repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
caiuspb/home-assistant | homeassistant/util/template.py | 1 | 3232 | """
homeassistant.util.template
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Template utility methods for rendering strings with HA data.
"""
# pylint: disable=too-few-public-methods
import json
import logging
import jinja2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import STATE_UNKNOWN
from homeassistant.exceptions import TemplateError
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
def render_with_possible_json_value(hass, template, value,
error_value=_SENTINEL):
""" Renders template with value exposed.
If valid JSON will expose value_json too. """
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return render(hass, template, variables)
except TemplateError:
_LOGGER.exception('Error parsing value')
return value if error_value is _SENTINEL else error_value
def render(hass, template, variables=None, **kwargs):
""" Render given template. """
if variables is not None:
kwargs.update(variables)
try:
return ENV.from_string(template, {
'states': AllStates(hass),
'is_state': hass.states.is_state
}).render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
class AllStates(object):
""" Class to expose all HA states as attributes. """
def __init__(self, hass):
self._hass = hass
def __getattr__(self, name):
return DomainStates(self._hass, name)
def __iter__(self):
return iter(sorted(self._hass.states.all(),
key=lambda state: state.entity_id))
def __call__(self, entity_id):
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
""" Class to expose a specific HA domain as attributes. """
def __init__(self, hass, domain):
self._hass = hass
self._domain = domain
def __getattr__(self, name):
return self._hass.states.get('{}.{}'.format(self._domain, name))
def __iter__(self):
return iter(sorted(
(state for state in self._hass.states.all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
def forgiving_round(value, precision=0):
""" Rounding method that accepts strings. """
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except ValueError:
# If value can't be converted to float
return value
def multiply(value, amount):
""" Converts to float and multiplies value. """
try:
return float(value) * amount
except ValueError:
# If value can't be converted to float
return value
class TemplateEnvironment(ImmutableSandboxedEnvironment):
""" Home Assistant template environment. """
def is_safe_callable(self, obj):
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
| mit | 4,492,025,769,520,362,500 | 27.60177 | 74 | 0.631188 | false |
digwanderlust/pants | src/python/pants/backend/project_info/tasks/depmap.py | 1 | 8699 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.backend.jvm.ivy_utils import IvyModuleRef
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import deprecated
from pants.base.exceptions import TaskError
class Depmap(ConsoleTask):
"""Generates either a textual dependency tree or a graphviz digraph dot file for the dependency
set of a target.
"""
class SourceRootTypes(object):
"""Defines SourceRoot Types Constants"""
SOURCE = 'SOURCE' # Source Target
TEST = 'TEST' # Test Target
SOURCE_GENERATED = 'SOURCE_GENERATED' # Code Gen Source Targets
EXCLUDED = 'EXCLUDED' # Excluded Target
RESOURCE = 'RESOURCE' # Resource belonging to Source Target
TEST_RESOURCE = 'TEST_RESOURCE' # Resource belonging to Test Target
@staticmethod
def _jar_id(jar):
if jar.rev:
return '{0}:{1}:{2}'.format(jar.org, jar.name, jar.rev)
else:
return '{0}:{1}'.format(jar.org, jar.name)
@staticmethod
def _address(address):
"""
:type address: pants.base.address.SyntheticAddress
"""
return '{0}:{1}'.format(address.spec_path, address.target_name)
@classmethod
def register_options(cls, register):
super(Depmap, cls).register_options(register)
register('--internal-only', default=False, action='store_true',
help='Specifies that only internal dependencies should be included in the graph '
'output (no external jars).')
register('--external-only', default=False, action='store_true',
help='Specifies that only external dependencies should be included in the graph '
'output (only external jars).')
register('--minimal', default=False, action='store_true',
help='For a textual dependency tree, only prints a dependency the 1st '
'time it is encountered. This is a no-op for --graph.')
register('--graph', default=False, action='store_true',
help='Specifies the internal dependency graph should be output in the dot digraph '
'format.')
register('--tree', default=False, action='store_true',
help='For text output, show an ascii tree to help visually line up indentions.')
register('--show-types', default=False, action='store_true',
help='Show types of objects in depmap --graph.')
register('--separator', default='-',
help='Specifies the separator to use between the org/name/rev components of a '
'dependency\'s fully qualified name.')
register('--path-to',
help='Show only items on the path to the given target. This is a no-op for --graph.')
def __init__(self, *args, **kwargs):
super(Depmap, self).__init__(*args, **kwargs)
self.is_internal_only = self.get_options().internal_only
self.is_external_only = self.get_options().external_only
if self.is_internal_only and self.is_external_only:
raise TaskError('At most one of --internal-only or --external-only can be selected.')
self.is_minimal = self.get_options().minimal
self.is_graph = self.get_options().graph
self.should_tree = self.get_options().tree
self.show_types = self.get_options().show_types
self.path_to = self.get_options().path_to
self.separator = self.get_options().separator
self.target_aliases_map = None
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
out = self._output_digraph(target) if self.is_graph else self._output_dependency_tree(target)
for line in out:
yield line
def _dep_id(self, dependency):
"""Returns a tuple of dependency_id, is_internal_dep."""
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
# TODO(kwilson): handle 'classifier' and 'type'.
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
is_internal_dep = False
else:
params.update(org='internal', name=dependency.id)
is_internal_dep = True
return ('{org}{sep}{name}{sep}{rev}' if params.get('rev') else
'{org}{sep}{name}').format(**params), is_internal_dep
def _enumerate_visible_deps(self, dep, predicate):
dep_id, internal = self._dep_id(dep)
dependencies = sorted([x for x in getattr(dep, 'dependencies', [])]) + sorted(
[x for x in getattr(dep, 'jar_dependencies', [])] if not self.is_internal_only else [])
for inner_dep in dependencies:
dep_id, internal = self._dep_id(inner_dep)
if predicate(internal):
yield inner_dep
def output_candidate(self, internal):
return ((not self.is_internal_only and not self.is_external_only)
or (self.is_internal_only and internal)
or (self.is_external_only and not internal))
def _output_dependency_tree(self, target):
"""Plain-text depmap output handler."""
def make_line(dep, indent, is_dupe=False):
indent_join, indent_chars = ('--', ' |') if self.should_tree else ('', ' ')
dupe_char = '*' if is_dupe else ''
return ''.join((indent * indent_chars, indent_join, dupe_char, dep))
def output_deps(dep, indent, outputted, stack):
dep_id, internal = self._dep_id(dep)
if self.path_to:
# If we hit the search target from self.path_to, yield the stack items and bail.
if dep_id == self.path_to:
for dep_id, indent in stack + [(dep_id, indent)]:
yield make_line(dep_id, indent)
return
else:
if not (dep_id in outputted and self.is_minimal) and self.output_candidate(internal):
yield make_line(dep_id,
0 if self.is_external_only else indent,
is_dupe=dep_id in outputted)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, indent + 1, outputted, stack + [(dep_id, indent)]):
yield item
for item in output_deps(target, 0, set(), []):
yield item
def _output_digraph(self, target):
"""Graphviz format depmap output handler."""
color_by_type = {}
def maybe_add_type(dep, dep_id):
"""Add a class type to a dependency id if --show-types is passed."""
return dep_id if not self.show_types else '\\n'.join((dep_id, dep.__class__.__name__))
def make_node(dep, dep_id, internal):
line_fmt = ' "{id}" [style=filled, fillcolor={color}{internal}];'
int_shape = ', shape=ellipse' if not internal else ''
dep_class = dep.__class__.__name__
if dep_class not in color_by_type:
color_by_type[dep_class] = len(color_by_type.keys()) + 1
return line_fmt.format(id=dep_id, internal=int_shape, color=color_by_type[dep_class])
def make_edge(from_dep_id, to_dep_id, internal):
style = ' [style=dashed]' if not internal else ''
return ' "{}" -> "{}"{};'.format(from_dep_id, to_dep_id, style)
def output_deps(dep, parent, parent_id, outputted):
dep_id, internal = self._dep_id(dep)
if dep_id not in outputted:
yield make_node(dep, maybe_add_type(dep, dep_id), internal)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, dep, dep_id, outputted):
yield item
if parent:
edge_id = (parent_id, dep_id)
if edge_id not in outputted:
yield make_edge(maybe_add_type(parent, parent_id), maybe_add_type(dep, dep_id), internal)
outputted.add(edge_id)
yield 'digraph "{}" {{'.format(target.id)
yield ' node [shape=rectangle, colorscheme=set312;];'
yield ' rankdir=LR;'
for line in output_deps(target, parent=None, parent_id=None, outputted=set()):
yield line
yield '}'
| apache-2.0 | 3,285,786,233,342,723,000 | 40.42381 | 99 | 0.651454 | false |
timor-raiman/yas3fs | yas3fs/fuse.py | 1 | 28128 | # Copyright (c) 2012 Terence Honles <[email protected]> (maintainer)
# Copyright (c) 2008 Giorgos Verigakis <[email protected]> (author)
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from os import strerror
from platform import machine, system
from signal import signal, SIGINT, SIG_DFL
from stat import S_IFDIR
from traceback import print_exc
import logging
try:
from functools import partial
except ImportError:
# http://docs.python.org/library/functools.html#functools.partial
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
basestring
except NameError:
basestring = str
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
_machine = machine()
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or
find_library('fuse'))
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
else:
_libfuse = CDLL(_libfuse_path)
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t),
('f_fsid', c_ulong),
#('unused', c_int),
('f_flag', c_ulong),
('f_namemax', c_ulong)
]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp,
CFUNCTYPE(c_int, c_voidp, c_char_p,
POINTER(c_stat), c_off_t),
c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int,
POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t,
POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t,
POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info),
c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))),
]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
def fuse_get_context():
'Returns a (uid, gid, pid) tuple'
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
'''
This class is the lower level interface and should not be subclassed under
normal use. Its methods are called by fuse.
Assumes API version 2.6 or later.
'''
OPTIONS = (
('foreground', '-f'),
('debug', '-d'),
('nothreads', '-s'),
)
def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8',
**kwargs):
'''
Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc.
'''
self.operations = operations
self.raw_fi = raw_fi
self.encoding = encoding
args = ['fuse']
args.extend(flag for arg, flag in self.OPTIONS
if kwargs.pop(arg, False))
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(self._normalize_fuse_options(**kwargs)))
args.append(mountpoint)
args = [arg.encode(encoding) for arg in args]
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
try:
old_handler = signal(SIGINT, SIG_DFL)
except ValueError:
old_handler = SIG_DFL
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
try:
signal(SIGINT, old_handler)
except ValueError:
pass
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
@staticmethod
def _normalize_fuse_options(**kargs):
for key, value in kargs.items():
if isinstance(value, bool):
if value is True: yield key
else:
yield '%s=%s' % (key, value)
@staticmethod
def _wrapper(func, *args, **kwargs):
'Decorator for the methods that follow'
try:
return func(*args, **kwargs) or 0
except OSError, e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path.decode(self.encoding)) \
.encode(self.encoding)
# copies a string into the given buffer
# (null terminated and truncated if necessary)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path.decode(self.encoding), mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path.decode(self.encoding), mode)
def unlink(self, path):
return self.operations('unlink', path.decode(self.encoding))
def rmdir(self, path):
return self.operations('rmdir', path.decode(self.encoding))
def symlink(self, source, target):
'creates a symlink `target -> source` (e.g. ln -s source target)'
return self.operations('symlink', target.decode(self.encoding),
source.decode(self.encoding))
def rename(self, old, new):
return self.operations('rename', old.decode(self.encoding),
new.decode(self.encoding))
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding))
def chmod(self, path, mode):
return self.operations('chmod', path.decode(self.encoding), mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path.decode(self.encoding), uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path.decode(self.encoding), length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path.decode(self.encoding), fi)
else:
fi.fh = self.operations('open', path.decode(self.encoding),
fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
ret = self.operations('read', path.decode(self.encoding), size,
offset, fh)
if not ret: return 0
retsize = len(ret)
assert retsize <= size, \
'actual amount read %d greater than expected %d' % (retsize, size)
data = create_string_buffer(ret, retsize)
memmove(buf, ret, retsize)
return retsize
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('write', path.decode(self.encoding), data,
offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path.decode(self.encoding))
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('flush', path.decode(self.encoding), fh)
def release(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('release', path.decode(self.encoding), fh)
def fsync(self, path, datasync, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('fsync', path.decode(self.encoding), datasync,
fh)
def setxattr(self, path, name, value, size, options, *args):
return self.operations('setxattr', path.decode(self.encoding),
name.decode(self.encoding),
string_at(value, size), options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path.decode(self.encoding),
name.decode(self.encoding), *args)
retsize = len(ret)
# allow size queries
if not value: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
attrs = self.operations('listxattr', path.decode(self.encoding)) or ''
ret = '\x00'.join(attrs).encode(self.encoding) + '\x00'
retsize = len(ret)
# allow size queries
if not namebuf: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize)
memmove(namebuf, buf, retsize)
return retsize
def removexattr(self, path, name):
return self.operations('removexattr', path.decode(self.encoding),
name.decode(self.encoding))
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir',
path.decode(self.encoding))
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path.decode(self.encoding),
fip.contents.fh):
if isinstance(item, basestring):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name.encode(self.encoding), st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path.decode(self.encoding),
fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path.decode(self.encoding),
datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path.decode(self.encoding), amode)
def create(self, path, mode, fip):
fi = fip.contents
path = path.decode(self.encoding)
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('truncate', path.decode(self.encoding),
length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
if not fip:
fh = fip
elif self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
attrs = self.operations('getattr', path.decode(self.encoding), fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('lock', path.decode(self.encoding), fh, cmd,
lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path.decode(self.encoding), times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path.decode(self.encoding), blocksize,
idx)
class Operations(object):
'''
This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception on
error.
When in doubt of what an operation should do, check the FUSE header file
or the corresponding system call man page.
'''
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
'''
When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0.
'''
raise FuseOSError(EROFS)
def destroy(self, path):
'Called on filesystem destruction. Path is always /'
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
'''
Returns a dictionary with keys identical to the stat C structure of
stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X
concerning st_nlink of directories. Mac OS X counts all files inside
the directory, while Linux counts only the subdirectories.
'''
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
'''
Called on filesystem initialization. (Path is always /)
Use it instead of __init__ if you start threads on initialization.
'''
pass
def link(self, target, source):
'creates a hard link `target -> source` (e.g. ln source target)'
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
'''
When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly.
'''
return 0
def opendir(self, path):
'Returns a numerical file handle.'
return 0
def read(self, path, size, offset, fh):
'Returns a string containing the data requested.'
raise FuseOSError(EIO)
def readdir(self, path, fh):
'''
Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr.
'''
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
'''
Returns a dictionary with keys identical to the statvfs C structure of
statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2
(minimum 512).
'''
return {}
def symlink(self, target, source):
'creates a symlink `target -> source` (e.g. ln -s source target)'
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
'Times is a (atime, mtime) tuple. If None use current time.'
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
log = logging.getLogger('fuse.log-mixin')
def __call__(self, op, path, *args):
self.log.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError, e:
ret = str(e)
raise
finally:
self.log.debug('<- %s %s', op, repr(ret)) | mit | 7,666,557,376,124,482,000 | 30.324053 | 78 | 0.539107 | false |
ytaben/cyphesis | rulesets/basic/mind/NPCMind.py | 1 | 38678 | #This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 1999 Aloril (See the file COPYING for details).
import random
import traceback
from atlas import *
from physics import *
from physics import Quaternion
from common import const
from types import *
from physics import Vector3D
import server
from mind.Memory import Memory
from mind.Knowledge import Knowledge
from mind.panlingua import interlinguish,ontology
from mind.compass import vector_to_compass
from common import log,const
import dictlist
import mind.goals
import mind.goals.common
reverse_cmp={'>':'<'}
def get_dict_func(self, func_str, func_undefined):
"""get method by name from instance or return default handler"""
try:
func=getattr(self,func_str)
except AttributeError:
func=func_undefined
return func
class NPCMind(server.Mind):
"""Mind class for most mobile entities in the game.
An NPCMind object is associated with all NPC and similar entities on a
game server. It handles perception data from the world, tracks what
the NPC knows about, and handles its goals.
The data is organized into three key data structures:
self.map is handled by the underlying C++ code, and contains a copy of
all the entities in the world that this NPC is currently able to perceive.
self.knowledge contains data triples which define relations between
entities.
self.goals and self.trigger_goals contain trees of goals which represent
current and potential activities that NPC might engage in. self.goals are
goals which are checked each tick, self.trigger_goals are goals which
are activated by an event."""
########## Initialization
def __init__(self, cppthing):
self.mind = cppthing
self.knowledge=Knowledge()
self.mem=Memory(map=self.map)
self.things={}
self.pending_things=[]
self._reverse_knowledge()
self.goals=[]
self.money_transfers=[]
self.transfers=[]
self.trigger_goals={}
self.jitter=random.uniform(-0.1, 0.1)
#???self.debug=debug(self.name+".mind.log")
self.message_queue=None
#This is going to be really tricky
self.map.add_hooks_append("add_map")
self.map.update_hooks_append("update_map")
self.map.delete_hooks_append("delete_map")
self.goal_id_counter=0
def find_op_method(self, op_id, prefix="",undefined_op_method=None):
"""find right operation to invoke"""
if not undefined_op_method: undefined_op_method=self.undefined_op_method
return get_dict_func(self, prefix+op_id+"_operation",undefined_op_method)
def undefined_op_method(self, op):
"""this operation is used when no other matching operation is found"""
pass
def get_op_name_and_sub(self, op):
event_name = op.id
sub_op = op
# I am not quite sure why this is while, as it's only over true
# for one iteration.
while len(sub_op) and sub_op[0].get_name()=="op":
sub_op = sub_op[0]
event_name = event_name + "_" + sub_op.id
return event_name, sub_op
def is_talk_op_addressed_to_me_or_none(self, op):
"""Checks whether a Talk op is addressed either to none or to me.
This is useful is we want to avoid replying to queries addressed
to other entities."""
talk_entity=op[0]
if hasattr(talk_entity, "address"):
addressElement = talk_entity.address
if len(addressElement) == 0:
return True
return self.id in addressElement
return True
########## Map updates
def add_map(self, obj):
"""Hook called by underlying map code when an entity is added."""
#print "Map add",obj
pass
def update_map(self, obj):
"""Hook called by underlying map code when an entity is updated.
Fix ownership category for objects owned temporary under 'Foo' type."""
#print "Map update",obj
foo_lst = self.things.get('Foo',[])
for foo in foo_lst[:]: #us copy in loop, because it might get modified
print "Oh MY GOD! We have a Foo thing!"
if foo.id==obj.id:
self.remove_thing(foo)
self.add_thing(obj)
def delete_map(self, obj):
"""Hook called by underlying map code when an entity is deleted."""
#print "Map delete",obj
self.remove_thing(obj)
########## Operations
def setup_operation(self, op):
"""called once by world after object has been made
send first tick operation to object
This method is automatically invoked by the C++ BaseMind code, due to its *_operation name."""
#CHEAT!: add memory, etc... initialization (or some of it to __init__)
#Setup a tick operation for thinking
thinkTickOp = Operation("tick")
thinkTickOp.setArgs([Entity(name="think")])
#Setup a tick operation for periodical persistence of thoughts to the server
sendThoughtsTickOp = Operation("tick")
sendThoughtsTickOp.setArgs([Entity(name="persistthoughts")])
sendThoughtsTickOp.setFutureSeconds(5)
return Operation("look")+thinkTickOp+sendThoughtsTickOp
def tick_operation(self, op):
"""periodically reasses situation
This method is automatically invoked by the C++ BaseMind code, due to its *_operation name.
"""
args=op.getArgs()
if len(args) != 0:
if args[0].name == "think":
#It's a "thinking" op, which is the base of the AI behaviour.
#At regular intervals the AI needs to assess its goals; this is one through "thinkning" ops.
opTick=Operation("tick")
#just copy the args from the previous tick
opTick.setArgs(args)
opTick.setFutureSeconds(const.basic_tick + self.jitter)
for t in self.pending_things:
thing = self.map.get(t)
if thing and thing.type[0]:
self.add_thing(thing)
self.pending_things=[]
result=self.think()
if self.message_queue:
result = self.message_queue + result
self.message_queue = None
return opTick+result
elif args[0].name == "persistthoughts":
#It's a periodic tick for sending thoughts to the server (so that they can be persisted)
#TODO: only send thoughts when they have changed.
opTick=Operation("tick")
#just copy the args from the previous tick
opTick.setArgs(args)
#Persist the thoughts to the server at 30 second intervals.
opTick.setFutureSeconds(30)
result = self.commune_all_thoughts(op, "persistthoughts")
return opTick+result
def unseen_operation(self, op):
"""This method is automatically invoked by the C++ BaseMind code, due to its *_operation name."""
if len(op) > 0:
obsolete_id = op[0].id
if obsolete_id:
self.map.delete(obsolete_id)
########## Sight operations
def sight_create_operation(self, op):
"""Note our ownership of entities we created.
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name."""
#BaseMind version overridden!
obj=self.map.add(op[0], op.getSeconds())
if op.to==self.id:
self.add_thing(obj)
def sight_move_operation(self, op):
"""change position in our local map
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name."""
obj=self.map.update(op[0], op.getSeconds())
if obj.location.parent.id==self.id:
self.add_thing(obj)
if op.to != self.id:
self.transfers.append((op.from_, obj.id))
if obj.type[0]=="coin" and op.from_ != self.id:
self.money_transfers.append([op.from_, 1])
return Operation("imaginary", Entity(description="accepts"))
def think_get_operation(self, op):
"""A Think op wrapping a Get op is used to inquire about the status of a mind.
It's often sent from authoring clients, as well as the server itself when
it wants to persist the thoughts of a mind.
A Get op without any args means that the mind should dump all its thoughts.
If there are args however, the meaning of what's to return differs depending on the
args.
* If "goal" is specified, a "think" operation only pertaining to goals is returned. The
"goal" arg should be a map, where the keys and values are used to specify exactly what goals
to return. An empty map returns all goals.
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name."""
args=op.getArgs()
#If there are no args we should send all of our thoughts
if len(args) == 0:
return self.commune_all_thoughts(op, None)
else:
argEntity=args[0]
if hasattr(argEntity, "goal"):
goal_entity = argEntity.goal
return self.commune_goals(op, goal_entity)
#TODO: allow for finer grained query of specific thoughts
def commune_goals(self, op, goal_entity):
"""Sends back information about goals only."""
thinkOp = Operation("think")
setOp = Operation("set")
thoughts = []
#It's important that the order of the goals is retained
for goal in self.goals:
goalString = ""
if hasattr(goal, "str"):
goalString = goal.str
else:
goalString = goal.__class__.__name__
thoughts.append(Entity(goal=goalString, id=goalString))
for (trigger, goallist) in sorted(self.trigger_goals.items()):
for goal in goallist:
goalString = ""
if hasattr(goal, "str"):
goalString = goal.str
else:
goalString = goal.__class__.__name__
thoughts.append(Entity(goal=goalString, id=goalString))
setOp.setArgs(thoughts)
thinkOp.setArgs([setOp])
thinkOp.setRefno(op.getSerialno())
res = Oplist()
res = res + thinkOp
return res
def find_goal(self, definition):
"""Searches for a goal, with the specified id"""
#Goals are either stored in "self.goals" or "self.trigger_goals", so we need
#to check both
for goal in self.goals:
if goal.str == definition:
return goal
for (trigger, goallist) in sorted(self.trigger_goals.items()):
for goal in goallist:
if goal.str == definition:
return goal
return None
def think_look_operation(self, op):
"""Sends back information about goals. This is mainly to be used for debugging minds.
If no arguments are specified all goals will be reported, else a match will be done
using 'id'.
The information will be sent back as a Think operation wrapping an Info operation.
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name.
"""
thinkOp = Operation("think")
goalInfoOp = Operation("info")
goal_infos = []
if not op.getArgs():
#get all goals
for goal in self.goals:
goal_infos.append(Entity(id=goal.str, report=goal.report()))
for (trigger, goallist) in sorted(self.trigger_goals.items()):
for goal in goallist:
goal_infos.append(Entity(id=goal.str, report=goal.report()))
else:
for arg in op.getArgs():
goal = self.find_goal(arg.id)
if goal:
goal_infos.append(Entity(id=goal.str, report=goal.report()))
goalInfoOp.setArgs(goal_infos)
thinkOp.setRefno(op.getSerialno())
thinkOp.setArgs([goalInfoOp])
res = Oplist()
res = res + thinkOp
return res
def commune_all_thoughts(self, op, name):
"""Sends back information on all thoughts. This includes knowledge and goals,
as well as known things.
The thoughts will be sent back as a "think" operation, wrapping a Set operation, in a manner such that if the
same think operation is sent back to the mind all thoughts will be restored. In
this way the mind can support server side persistence of its thoughts.
A name can optionally be supplied, which will be set on the Set operation.
"""
thinkOp = Operation("think")
setOp = Operation("set")
thoughts = []
for attr in sorted(dir(self.knowledge)):
d=getattr(self.knowledge, attr)
if getattr(d, '__iter__', False):
for key in sorted(d):
if attr!="goal":
objectVal=d[key]
if type(objectVal) is Location:
#Serialize Location as tuple, with parent if available
if (objectVal.parent is None):
location=objectVal.coordinates
else:
location=("$eid:" + objectVal.parent.id,objectVal.coordinates)
object=str(location)
else:
object=str(d[key])
thoughts.append(Entity(predicate=attr, subject=str(key), object=object))
#It's important that the order of the goals is retained
for goal in self.goals:
if hasattr(goal, "str"):
thoughts.append(Entity(goal=goal.str, id=goal.str))
for (trigger, goallist) in sorted(self.trigger_goals.items()):
for goal in goallist:
if hasattr(goal, "str"):
thoughts.append(Entity(goal=goal.str, id=goal.str))
if len(self.things) > 0:
things={}
for (id, thinglist) in sorted(self.things.items()):
idlist=[]
for thing in thinglist:
idlist.append(thing.id)
things[id] = idlist
thoughts.append(Entity(things=things))
if len(self.pending_things) > 0:
thoughts.append(Entity(pending_things=self.pending_things))
setOp.setArgs(thoughts)
thinkOp.setArgs([setOp])
if not op.isDefaultSerialno():
thinkOp.setRefno(op.getSerialno())
if name:
setOp.setName(name)
res = Oplist()
res = res + thinkOp
return res
def think_delete_operation(self, op):
"""Deletes a thought, or all thoughts if no argument is specified.
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name."""
if not op.getArgs():
self.goals = []
self.trigger_goals = {}
else:
args=op.getArgs()
for thought in args:
for goal in self.goals:
if goal.str == thought.id:
self.goals.remove(goal)
return
for (trigger, goallist) in sorted(self.trigger_goals.items()):
for goal in goallist:
if goal.str == thought.id:
goallist.remove(goal)
return
def think_set_operation(self, op):
"""Sets a new thought, or updates an existing one
This method is automatically invoked by the C++ BaseMind code, due to its *_*_operation name."""
#If the Set op has the name "peristthoughts" it's a Set op sent to ourselves meant for the server
#(so it can persist the thoughts in the database). We should ignore it.
if op.getName() == "persistthoughts":
return
args=op.getArgs()
for thought in args:
#Check if there's a 'predicate' set; if so handle it as knowledge.
#Else check if it's things that we know we own or ought to own.
if hasattr(thought, "predicate") == False:
if hasattr(thought, "things"):
things=thought.things
for (id, thinglist) in things.items():
#We can't iterate directly over the list, as it's of type atlas.Message; we must first "pythonize" it.
#This should be reworked into a better way.
thinglist=thinglist.pythonize()
for thingId in thinglist:
thingId=str(thingId)
thing = self.map.get(thingId)
if thing and thing.type[0]:
self.add_thing(thing)
else:
self.pending_things.append(thingId)
elif hasattr(thought, "pending_things"):
for id in thought.pending_things:
self.pending_things.append(str(id))
elif hasattr(thought, "goal"):
goalString=str(thought.goal)
if hasattr(thought, "id"):
id=str(thought.id)
goal = self.find_goal(id)
if goal:
self.update_goal(goal, goalString)
else:
self.add_goal(goalString)
else:
self.add_goal(goalString)
else:
subject=thought.subject
predicate=thought.predicate
object=thought.object
#Handle locations.
if len(object) > 0 and object[0]=='(':
#CHEAT!: remove eval
locdata=eval(object)
#If only coords are supplied, it's handled as a location within the same parent space as ourselves
if (len(locdata) == 3):
loc=self.location.copy()
loc.coordinates=Vector3D(list(locdata))
elif (len(locdata) == 2):
entity_id_string = locdata[0]
#A prefix of "$eid:" denotes an entity id; it should be stripped first.
if entity_id_string.startswith("$eid:"):
entity_id_string = entity_id_string[5:]
where=self.map.get_add(entity_id_string)
coords=Point3D(list(locdata[1]))
loc=Location(where, coords)
self.add_knowledge(predicate,subject,loc)
else:
self.add_knowledge(predicate,subject,object)
########## Talk operations
def admin_sound(self, op):
assert(op.from_ == op.to)
return op.from_ == self.id
def interlinguish_warning(self, op, say, msg):
log.debug(1,str(self.id)+" interlinguish_warning: "+str(msg)+\
": "+str(say[0].lexlink.id[1:]),op)
def interlinguish_desire_verb3_buy_verb1_operation(self, op, say):
"""Handle a sentence of the form 'I would like to buy a ....'
Check if we have any of the type of thing the other character is
interested in, and whether we know what price to sell at. If so
set up the transaction goal, which offers to sell it."""
object=say[1].word
thing=self.things.get(object)
if thing:
price=self.get_knowledge("price", object)
if not price:
return
goal=mind.goals.common.misc_goal.transaction(object, op.to, price)
who=self.map.get(op.to)
self.goals.insert(0,goal)
return Operation("talk", Entity(say=self.thing_name(who)+" one "+object+" will be "+str(price)+" coins")) + self.face(who)
def interlinguish_desire_verb3_operation(self, op, say):
"""Handle a sentence of the form 'I would like to ...'"""
object=say[2:]
verb=interlinguish.get_verb(object)
operation_method=self.find_op_method(verb,"interlinguish_desire_verb3_",
self.interlinguish_undefined_operation)
res = Oplist()
res = res + self.call_interlinguish_triggers(verb, "interlinguish_desire_verb3_", op, object)
res = res + operation_method(op, object)
return res
def interlinguish_be_verb1_operation(self, op, say):
"""Handle sentences of the form '... is more important that ...'
Accept instructions about the priority of goals relative to each
based on key verbs associated with those goals."""
if not self.admin_sound(op):
return self.interlinguish_warning(op,say,"You are not admin")
res=interlinguish.match_importance(say)
if res:
return self.add_importance(res['sub'].id,'>',res['obj'].id)
else:
return self.interlinguish_warning(op,say,"Unkown assertion")
def interlinguish_know_verb1_operation(self, op, say):
"""Handle a sentence of the form 'know subject predicate object'
Accept admin instruction about knowledge, and store the triple
in our knowledge base."""
if not self.admin_sound(op):
return self.interlinguish_warning(op,say,"You are not admin")
subject=say[1].word
predicate=say[2].word
object=say[3].word
## print "know:",subject,predicate,object
if object[0]=='(':
#CHEAT!: remove eval
xyz=list(eval(object))
loc=self.location.copy()
loc.coordinates=Vector3D(xyz)
self.add_knowledge(predicate,subject,loc)
else:
self.add_knowledge(predicate,subject,object)
def interlinguish_tell_verb1_operation(self, op, say):
"""Handle a sentence of the form 'Tell (me) ....'
Accept queries about what we know. Mostly this is for debugging
and for the time being it is useful to answer these queries no matter
who hasks."""
# Ignore messages addressed to others
if not self.is_talk_op_addressed_to_me_or_none(op):
return None
# Currently no checking for trus here.
# We are being liberal with interpretation of "subject" and "object"
subject=say[1].word
predicate=say[2].word
object=say[3].word
k=self.get_knowledge(predicate, object)
if k==None: pass
# return Operation('talk',Entity(say="I know nothing about the "+predicate+" of "+object))
else:
k_type = type(k)
if k_type==type(Location()):
dist = distance_to(self.location, k)
dist.z = 0
distmag = dist.mag()
if distmag < 8:
k = 'right here'
else:
# Currently this assumes dist is relative to TLVE
k='%f metres %s' % (distmag, vector_to_compass(dist))
elif k_type!=StringType:
k='difficult to explain'
elif predicate=='about':
return self.face_and_address(op.to, k)
return self.face_and_address(op.to, "The " + predicate + " of " +
object + " is " + k)
def interlinguish_list_verb1_operation(self, op, say):
"""Handle a sentence of the form 'List (me) ....'
Accept queries about what we know. Mostly this is for debugging
and for the time being it is useful to answer these queries no matter
who asks.
Querying for "all knowledge" will list all knowledge.
"""
# Ignore messages addressed to others
if not self.is_talk_op_addressed_to_me_or_none(op):
return None
# Currently no checking for trus here.
# We are being liberal with interpretation of "subject" and "object"
subject=say[1].word
predicate=say[2].word
if predicate == 'all knowledge':
res = Oplist()
res = res + self.face(self.map.get(op.to))
for attr in dir(self.knowledge):
d=getattr(self.knowledge, attr)
if getattr(d, '__iter__', False):
for key in d:
#print attr + " of "+key+": " +str(d[key])
res = res + self.address(op.to, "The " + attr + " of " +
key + " is " + str(d[key]))
return res
else:
if not hasattr(self.knowledge, predicate):
return None
d=getattr(self.knowledge, predicate)
res = Oplist()
res = res + self.face(self.map.get(op.to))
for key in d:
res = res + self.address(op.to, "The " + predicate + " of " +
key + " is " + str(d[key]))
return res
def interlinguish_own_verb1_operation(self, op, say):
"""Handle a sentence of the form ' own ...'
Sentences of this form from the admin inform us that we own an
entity. This is essential when an entity needs to be used as a
tool, or raw material."""
if not self.admin_sound(op):
return self.interlinguish_warning(op,say,"You are not admin")
## print self,"own:",say[1].word,say[2].word
subject=self.map.get_add(say[1].word)
## print "subject found:",subject
object=self.map.get_add(say[2].word)
## print "object found:",object
## if subject.id==self.id:
## foo
if subject.id==self.id:
self.add_thing(object)
def interlinguish_undefined_operation(self, op, say):
#CHEAT!: any way to handle these?
log.debug(2,str(self.id)+" interlinguish_undefined_operation:",op)
log.debug(2,str(say))
########## Sound operations
def sound_talk_operation(self, op):
"""Handle the sound of a talk operation from another character.
The spoken sentence comes in as a sentence string, which
is converted into a structure representation by the interlinguish
code. Embedded in the structure is the interlinguish string which
is then used to call methods and activate triggers, such as
dynamic goals."""
talk_entity=op[0]
if interlinguish.convert_english_to_interlinguish(self, talk_entity):
say=talk_entity.interlinguish
verb=interlinguish.get_verb(say)
operation_method=self.find_op_method(verb,"interlinguish_",
self.interlinguish_undefined_operation)
res = self.call_interlinguish_triggers(verb, "interlinguish_", op, say)
res2 = operation_method(op,say)
if res:
res += res2
else:
res = res2
return res
########## Other operations
def call_interlinguish_triggers(self, verb, prefix, op, say):
"""Call trigger goals that have registered a trigger string that
matches the current interlinguish string.
Given an interlinguish verb string, and a prefix, find any trigger
goals that should be activated by the combined trigger string, and
activate them."""
# FIXME Don't need this call to get_op_name_and_sub, as we don't use
# the result.
null_name, sub_op = self.get_op_name_and_sub(op)
event_name = prefix+verb
reply = Oplist()
for goal in self.trigger_goals.get(event_name,[]):
reply += goal.event(self, op, say)
return reply
def call_triggers_operation(self, op):
event_name, sub_op = self.get_op_name_and_sub(op)
reply = Oplist()
for goal in self.trigger_goals.get(event_name,[]):
reply += goal.event(self, op, sub_op)
return reply
########## Generic knowledge
def _reverse_knowledge(self):
"""normally location: tell where items reside
reverse location tells what resides in this spot"""
self.reverse_knowledge=Knowledge()
for (k,v) in self.knowledge.location.items():
if not self.reverse_knowledge.location.get(v):
self.reverse_knowledge.add("location",v,k)
def get_reverse_knowledge(self, what, key):
"""get certain reverse knowledge value
what: what kind of knowledge (location only so far)"""
d=getattr(self.reverse_knowledge,what)
return d.get(key)
def get_knowledge(self, what, key):
"""get certain knowledge value
what: what kind of knowledge (see Knowledge.py for list)"""
if not hasattr(self.knowledge, what):
return None
d=getattr(self.knowledge,what)
return d.get(key)
def add_knowledge(self,what,key,value):
"""add certain type of knowledge"""
self.knowledge.add(what,key,value)
#forward thought
if type(value)==InstanceType:
if what=="goal":
thought_value = value.info()
else:
thought_value = `value`
else:
thought_value = value
desc="%s knowledge about %s is %s" % (what,key,thought_value)
# ent = Entity(description=desc, what=what, key=key, value=thought_value)
# self.send(Operation("thought",ent))
if what=="location":
#and reverse too
self.reverse_knowledge.add("location",value,key)
def remove_knowledge(self,what,key):
"""remove certain type of knowledge"""
self.knowledge.remove(what,key)
########## Importance: Knowledge about how things compare in urgency, etc..
def add_importance(self, sub, cmp, obj):
"""add importance: both a>b and b<a"""
self.add_knowledge('importance',(sub,obj),cmp)
self.add_knowledge('importance',(obj,sub),reverse_cmp[cmp])
def cmp_goal_importance(self, g1, g2):
"""which of goals is more important?
also handle more generic ones:
for example if you are comparing breakfast to sleeping
it will note that having breakfast is a (isa) type of eating"""
try:
id1=g1.key[1]
id2=g2.key[1]
except AttributeError:
return 1
l1=ontology.get_isa(id1)
l2=ontology.get_isa(id2)
for s1 in l1:
for s2 in l2:
cmp=self.knowledge.importance.get((s1.id,s2.id))
if cmp:
return cmp=='>'
return 1
########## things we own
def thing_name(self,thing):
if hasattr(thing, 'name'):
return thing.name
return thing.type[0]
########## things we own
def add_thing(self,thing):
"""I own this thing"""
#CHEAT!: this feature not yet supported
## if not thing.location:
## thing.location=self.get_knowledge("location",thing.place)
log.debug(3,str(self)+" "+str(thing)+" before add_thing: "+str(self.things))
#thought about owing thing
name = self.thing_name(thing)
if not name:
self.pending_things.append(thing.id)
return
# desc="I own %s." % name
# what=thing.as_entity()
# ent = Entity(description=desc, what=what)
# self.send(Operation("thought",ent))
dictlist.add_value(self.things,name,thing)
log.debug(3,"\tafter: "+str(self.things))
def find_thing(self, thing):
if StringType==type(thing):
#return found list or empty list
return self.things.get(thing,[])
found=[]
for t in self.things.get(self.thing_name(thing),[]):
if t==thing: found.append(t)
return found
def remove_thing(self, thing):
"""I don't own this anymore (it may not exist)"""
dictlist.remove_value(self.things, thing)
########## goals
def add_goal(self, str_goal):
"""add goal..."""
try:
goal = self.create_goal(str_goal)
except BaseException as e:
print("Error when adding goal: " + str(e))
return
self.insert_goal(goal)
return goal
def insert_goal(self, goal, id=None):
if not id:
self.goal_id_counter = self.goal_id_counter + 1
id=str(self.goal_id_counter)
goal.id = id
if hasattr(goal,"trigger"):
dictlist.add_value(self.trigger_goals, goal.trigger(), goal)
return
for i in range(len(self.goals)-1,-1,-1):
if self.cmp_goal_importance(self.goals[i],goal):
self.goals.insert(i+1,goal)
return
self.goals.insert(0,goal)
def update_goal(self, goal, str_goal):
try:
new_goal = self.create_goal(goal.key, str_goal)
except BaseException as e:
print("Error when updating goal: " + str(e))
return
new_goal.id = goal.id
#We need to handle the case where a goal which had a trigger is replaced by one
#that hasn't, and the opposite
if hasattr(goal,"trigger"):
dictlist.remove_value(self.trigger_goals, goal)
self.insert_goal(new_goal, goal.id)
else:
if hasattr(new_goal,"trigger"):
self.goals.remove(goal)
self.insert_goal(new_goal, goal.id)
else:
index=self.goals.index(goal)
self.goals[index] = new_goal
def create_goal(self, str_goal):
#CHEAT!: remove eval (this and later)
goal=eval("mind.goals."+str_goal)
if const.debug_thinking:
goal.debug=1
goal.str=str_goal
return goal
def remove_goal(self, goal):
"""Removes a goal."""
if hasattr(goal,"trigger"):
dictlist.remove_value(self.trigger_goals, goal)
else:
self.goals.remove(goal)
def fulfill_goals(self,time):
"see if all goals are fulfilled: if not try to fulfill them"
for g in self.goals[:]:
if g.irrelevant:
self.goals.remove(g)
continue
#Don't process goals which have had three errors in them.
#The idea is to allow for some leeway in goal processing, but to punish repeat offenders.
if g.errors > 3:
continue
try:
res=g.check_goal(self,time)
if res: return res
except:
stacktrace=traceback.format_exc()
g.errors += 1
g.lastError=stacktrace
#If there's an error, print to the log, mark the goal, and continue with the next goal
#Some goals have a "str" attribute which represents the constructor; if so use that
if hasattr(g, "str"):
goalstring=g.str
else:
goalstring=g.__class__.__name__
print "Error in NPC with id " + self.id + " of type " + str(self.type) + " and name '" + self.name + "' when checking goal " + goalstring + "\n" + stacktrace
continue
# if res!=None: return res
def teach_children(self, child):
res=Oplist()
for k in self.knowledge.location.keys():
es=Entity(verb='know',subject=k,object=self.knowledge.location[k])
res.append(Operation('say',es,to=child))
for k in self.knowledge.place.keys():
es=Entity(verb='know',subject=k,object=self.knowledge.place[k])
res.append(Operation('say',es,to=child))
for g in self.goals:
es=Entity(verb='learn',subject=g.key,object=g.str)
res.append(Operation('say',es,to=child))
for im in self.knowledge.importance.keys():
cmp=self.knowledge.importance[im]
if cmp=='>':
s,i=il.importance(im[0],cmp,im[1])
es=Entity(say=s,interlinguish=i)
res.append(Operation('say',es,to=child))
return res
########## thinking (needs rewrite)
def think(self):
if const.debug_thinking:
log.thinking("think: "+str(self.id))
output=self.fulfill_goals(self.time)
# if output and const.debug_thinking:
# log.thinking(str(self)+" result at "+str(self.time)+": "+output[-1][0].description)
return output
########## communication: here send it locally
def send(self, op):
if not self.message_queue:
self.message_queue=Oplist(op)
else:
self.message_queue.append(op)
########## turn to face other entity
def face(self, other):
vector = distance_to(self.location, other.location)
vector.z = 0
if vector.square_mag() < 0.1:
return
vector = vector.unit_vector()
newloc = Location(self.location.parent)
newloc.orientation = Quaternion(Vector3D(1,0,0), vector)
return Operation("move", Entity(self.id, location=newloc))
def address(self, entity_id, message):
"""Creates a new Talk op which is addressed to an entity"""
return Operation('talk', Entity(say=message, address=[entity_id]))
def face_and_address(self, entity_id, message):
"""Utility method for generating ops for both letting the NPC face
as well as address another entity. In most cases this is what you
want to do when conversing."""
return self.address(entity_id, message) + \
self.face(self.map.get(entity_id))
| gpl-2.0 | 7,546,670,054,895,222,000 | 41.880266 | 173 | 0.562413 | false |
Freso/listenbrainz-server | listenbrainz_spark/stats/user/tests/test_release.py | 1 | 3436 | import json
import os
from collections import defaultdict
from datetime import datetime
import listenbrainz_spark.stats.user.release as release_stats
from listenbrainz_spark import utils
from listenbrainz_spark.path import LISTENBRAINZ_DATA_DIRECTORY
from listenbrainz_spark.tests import SparkTestCase
from pyspark.sql import Row
from pyspark.sql.types import (ArrayType, StringType, StructField,
StructType)
class releaseTestCase(SparkTestCase):
# use path_ as prefix for all paths in this class.
path_ = LISTENBRAINZ_DATA_DIRECTORY
def tearDown(self):
path_found = utils.path_exists(self.path_)
if path_found:
utils.delete_dir(self.path_, recursive=True)
def save_dataframe(self, filename):
now = datetime.now()
with open(self.path_to_data_file(filename)) as f:
data = json.load(f)
schema = StructType((StructField('user_name', StringType()), StructField('artist_name', StringType()),
StructField('artist_msid', StringType()), StructField('artist_mbids', ArrayType(StringType())),
StructField('release_name', StringType()), StructField('release_msid', StringType()),
StructField('release_mbid', StringType())))
df = None
for entry in data:
for idx in range(0, entry['count']):
# Assign listened_at to each listen
row = utils.create_dataframe(Row(user_name=entry['user_name'], release_name=entry['release_name'],
release_msid=entry['release_msid'], release_mbid=entry['release_mbid'],
artist_name=entry['artist_name'], artist_msid=entry['artist_msid'],
artist_mbids=entry['artist_mbids']),
schema=schema)
df = df.union(row) if df else row
utils.save_parquet(df, os.path.join(self.path_, '{}/{}.parquet'.format(now.year, now.month)))
def test_get_releases(self):
self.save_dataframe('user_top_releases.json')
df = utils.get_listens(datetime.now(), datetime.now(), self.path_)
df.createOrReplaceTempView('test_view')
with open(self.path_to_data_file('user_top_releases.json')) as f:
data = json.load(f)
with open(self.path_to_data_file('user_top_releases_output.json')) as f:
expected = json.load(f)
data = release_stats.get_releases('test_view')
received = defaultdict(list)
for entry in data:
_dict = entry.asDict(recursive=True)
received[_dict['user_name']] = _dict['releases']
self.assertDictEqual(received, expected)
def test_get_releases_empty(self):
self.save_dataframe('user_top_releases_empty.json')
df = utils.get_listens(datetime.now(), datetime.now(), self.path_)
df.createOrReplaceTempView('test_view')
with open(self.path_to_data_file('user_top_releases.json')) as f:
data = json.load(f)
received = defaultdict(list)
data = release_stats.get_releases('test_view')
for entry in data:
_dict = entry.asDict(recursive=True)
received[_dict['user_name']] = _dict['releases']
self.assertDictEqual(received, {})
| gpl-2.0 | -1,975,790,899,982,015,000 | 41.95 | 124 | 0.599243 | false |
d120/pyophase | clothing/migrations/0006_auto_20160830_1443.py | 1 | 1098 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-30 12:43
from __future__ import unicode_literals
from django.db import migrations, models
from clothing.models import Size
def forwards_func(apps, schema_editor):
db_alias = schema_editor.connection.alias
# Set size_sortable for all exsisting sizes
for size in Size.objects.using(db_alias).all():
size.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('clothing', '0005_auto_20160829_1808'),
]
operations = [
migrations.AlterModelOptions(
name='size',
options={'ordering': ('size_sortable', 'size'), 'verbose_name': 'Größe', 'verbose_name_plural': 'Größen'},
),
migrations.AddField(
model_name='size',
name='size_sortable',
field=models.PositiveSmallIntegerField(default=0, help_text='Dieser Wert wird automatisch berechnet', verbose_name='Sortierbare Größe'),
),
migrations.RunPython(forwards_func, reverse_func),
]
| agpl-3.0 | -3,176,314,207,166,486,500 | 30.2 | 148 | 0.644689 | false |
eharney/cinder | cinder/api/contrib/cgsnapshots.py | 1 | 5734 | # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cgsnapshots api."""
from oslo_log import log as logging
import six
from six.moves import http_client
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
LOG = logging.getLogger(__name__)
class CgsnapshotsController(wsgi.Controller):
"""The cgsnapshots API controller for the OpenStack API."""
_view_builder_class = cgsnapshot_views.ViewBuilder
def __init__(self):
self.group_snapshot_api = group_api.API()
super(CgsnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given cgsnapshot."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
cgsnapshot = self._get_cgsnapshot(context, id)
return self._view_builder.detail(req, cgsnapshot)
def delete(self, req, id):
"""Delete a cgsnapshot."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info('Delete cgsnapshot with id: %s', id)
try:
cgsnapshot = self._get_cgsnapshot(context, id)
self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot)
except exception.GroupSnapshotNotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
except Exception:
msg = _("Failed cgsnapshot")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=http_client.ACCEPTED)
def index(self, req):
"""Returns a summary list of cgsnapshots."""
return self._get_cgsnapshots(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of cgsnapshots."""
return self._get_cgsnapshots(req, is_detail=True)
def _get_cg(self, context, id):
# Not found exception will be handled at the wsgi level
consistencygroup = self.group_snapshot_api.get(context, group_id=id)
return consistencygroup
def _get_cgsnapshot(self, context, id):
# Not found exception will be handled at the wsgi level
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
context, group_snapshot_id=id)
return cgsnapshot
def _get_cgsnapshots(self, req, is_detail):
"""Returns a list of cgsnapshots, transformed through view builder."""
context = req.environ['cinder.context']
grp_snapshots = self.group_snapshot_api.get_all_group_snapshots(
context)
grpsnap_limited_list = common.limited(grp_snapshots, req)
if is_detail:
grp_snapshots = self._view_builder.detail_list(
req, grpsnap_limited_list)
else:
grp_snapshots = self._view_builder.summary_list(
req, grpsnap_limited_list)
return grp_snapshots
@wsgi.response(http_client.ACCEPTED)
def create(self, req, body):
"""Create a new cgsnapshot."""
LOG.debug('Creating new cgsnapshot %s', body)
self.assert_valid_body(body, 'cgsnapshot')
context = req.environ['cinder.context']
cgsnapshot = body['cgsnapshot']
self.validate_name_and_description(cgsnapshot)
try:
group_id = cgsnapshot['consistencygroup_id']
except KeyError:
msg = _("'consistencygroup_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
group = self._get_cg(context, group_id)
name = cgsnapshot.get('name', None)
description = cgsnapshot.get('description', None)
LOG.info("Creating cgsnapshot %(name)s.",
{'name': name},
context=context)
try:
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
# Not found exception will be handled at the wsgi level
except (exception.InvalidGroup,
exception.InvalidGroupSnapshot,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_cgsnapshot)
return retval
class Cgsnapshots(extensions.ExtensionDescriptor):
"""cgsnapshots support."""
name = 'Cgsnapshots'
alias = 'cgsnapshots'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Cgsnapshots.alias, CgsnapshotsController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| apache-2.0 | -6,161,191,167,094,759,000 | 33.751515 | 78 | 0.649634 | false |
cbib/taxotree | plottingValues.py | 1 | 3214 | import matplotlib.pyplot as plt
import numpy as np
#@pearson is the Pearson coefficient
def plotPearsonGraph(xArray,yArray,pearson,xLabel="X",yLabel="f(X)",maxx=10,minx=0,maxy=10,miny=0,title="Plotting of unknown function f"):
mini = min(minx,miny)
n = len(xArray)
if not (n == len(yArray)):
print "\n/!\ ERROR: Different lengths: xArray",n,"and yArray",len(yArray),"."
raise ValueError
plt.grid(True)
plt.title(title)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.xlim(mini,maxx)
plt.ylim(mini,maxy)
t = np.zeros(n)
pearsont = np.zeros(n)
step = int(max(maxx,maxy)/n) + 1
currVal = 0
for i in range(n):
t[i] = currVal
pearsont[i] = currVal*pearson
currVal += step
#Lines will be in red for the main function, blue for pearson line
plt.plot(xArray,yArray,"ro",t,pearsont,"b--")
plt.show()
#Draws points
#len(xArray) == len(yArray)
#xArray and yArray are the array of values for the two variables
#xLabel and yLabel are the corresponding labels
def plotGraph(xArray,yArray,xLabel="X",yLabel="f(X)",maxx=10,minx=0,maxy=10,miny=0,title="Plotting of unknown function f"):
n = len(xArray)
if not (n == len(yArray)):
print "\n/!\ ERROR: Different lengths: xArray",n,"and yArray",len(yArray),"."
raise ValueError
plt.grid(True)
plt.title(title)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
#Lines will be in red
plt.plot(xArray,yArray,"ro")
plt.show()
#Draws histograms
def plotHist(xArray,xLabel="X",yLabel="f(X)",maxx=10,minx=0,maxy=10,miny=0,title="Histogram of unknown function f"):
#Green color
plt.hist(xArray,bins=50,normed=1,facecolor="g",alpha=0.5,label=xLabel)
plt.grid(True)
plt.title(title)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
plt.show()
#@labels is the array containing the labels of the pie chart (can go up to 14 different labels)
#@sizes is the arrays of parts of the pie chart owned by the different labels
def plotPieChart(labels,sizes,title):
initColors = ['gold','yellowgreen','lightcoral','lightskyblue','violet','blue','pink','red','orange','green','gray','brown','yellow','chartreuse','burlywood','cyan','magenta','white']
n = len(labels)
if not (n == len(sizes)):
print "\n/!\ ERROR: Different lengths ",len(labels),"and",len(sizes)
raise ValueError
if n > 18:
#Only displaying the most important elements
array = sorted([(labels[i],sizes[i]) for i in range(n)],key=lambda x:x[1])[:18]
labels,sizes = [],[]
for x in array:
labels.append(x[0])
sizes.append(x[1])
n = len(sizes)
#explode maximum percentage
iMax = 0
maximum = 0
for i in range(n):
if maximum < sizes[i]:
iMax = i
maximum = sizes[i]
explode = [0] * n
explode[iMax] = 0.1
labels = labels
sizes = sizes
colors = initColors[:n]
plt.pie(sizes,explode=explode,labels=labels,colors=colors,autopct='%1.1f%%',shadow=True,startangle=140)
plt.axis('equal')
plt.title(title)
plt.show()
| mit | -835,832,434,081,338,900 | 34.318681 | 187 | 0.635034 | false |
Guts/isogeo-api-py-minsdk | tests/test_search.py | 1 | 11978 | # -*- coding: UTF-8 -*-
#! python3
"""
Usage from the repo root folder:
.. code-block:: python
# for whole test
python -m unittest tests.test_search
# for specific
python -m unittest tests.test_search.TestSearch.test_search_search_as_application
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import logging
import unittest
import urllib3
from os import environ
from pathlib import Path
from random import randint, sample
from socket import gethostname
from sys import _getframe, exit
from time import gmtime, sleep, strftime
# 3rd party
from dotenv import load_dotenv
# module target
from isogeo_pysdk import Isogeo, Metadata, MetadataSearch
# #############################################################################
# ######## Globals #################
# ##################################
if Path("dev.env").exists():
load_dotenv("dev.env", override=True)
# host machine name - used as discriminator
hostname = gethostname()
# API access
METADATA_TEST_FIXTURE_UUID = environ.get("ISOGEO_FIXTURES_METADATA_COMPLETE")
WORKGROUP_TEST_FIXTURE_UUID = environ.get("ISOGEO_WORKGROUP_TEST_UUID")
# #############################################################################
# ########## Helpers ###############
# ##################################
def get_test_marker():
"""Returns the function name"""
return "TEST_PySDK - Search - {}".format(_getframe(1).f_code.co_name)
# #############################################################################
# ########## Classes ###############
# ##################################
class TestSearch(unittest.TestCase):
"""Test search methods."""
# -- Standard methods --------------------------------------------------------
@classmethod
def setUpClass(cls):
"""Executed when module is loaded before any test."""
# checks
if not environ.get("ISOGEO_API_GROUP_CLIENT_ID") or not environ.get(
"ISOGEO_API_GROUP_CLIENT_SECRET"
):
logging.critical("No API credentials set as env variables.")
exit()
else:
pass
# class vars and attributes
cls.li_fixtures_to_delete = []
# ignore warnings related to the QA self-signed cert
if environ.get("ISOGEO_PLATFORM").lower() == "qa":
urllib3.disable_warnings()
# API connection
cls.isogeo = Isogeo(
auth_mode="group",
client_id=environ.get("ISOGEO_API_GROUP_CLIENT_ID"),
client_secret=environ.get("ISOGEO_API_GROUP_CLIENT_SECRET"),
auto_refresh_url="{}/oauth/token".format(environ.get("ISOGEO_ID_URL")),
platform=environ.get("ISOGEO_PLATFORM", "qa"),
)
# getting a token
cls.isogeo.connect()
def setUp(self):
"""Executed before each test."""
# tests stuff
self.discriminator = "{}_{}".format(
hostname, strftime("%Y-%m-%d_%H%M%S", gmtime())
)
def tearDown(self):
"""Executed after each test."""
sleep(0.5)
@classmethod
def tearDownClass(cls):
"""Executed after the last test."""
# close sessions
cls.isogeo.close()
# -- TESTS ---------------------------------------------------------
# -- GET --
def test_search_search_as_application(self):
"""GET :resources/search"""
basic_search = self.isogeo.search()
# check attributes
self.assertTrue(hasattr(basic_search, "envelope"))
self.assertTrue(hasattr(basic_search, "limit"))
self.assertTrue(hasattr(basic_search, "offset"))
self.assertTrue(hasattr(basic_search, "query"))
self.assertTrue(hasattr(basic_search, "results"))
self.assertTrue(hasattr(basic_search, "tags"))
self.assertTrue(hasattr(basic_search, "total"))
# additional checks
print(
"Authenticated application has access to {} results".format(
basic_search.total
)
)
self.assertIsInstance(basic_search.total, int)
self.assertEqual(len(basic_search.results), 20)
# filter on a list of metadata uuids
def test_search_specific_mds_ok(self):
"""Searches filtering on specific metadata."""
# get random metadata within a small search
search_10 = self.isogeo.search(page_size=10, whole_results=0)
md_a, md_b = sample(search_10.results, 2)
md_bad = "trust_me_this_is_a_good_uuid"
# get random metadata within a small search
search_ids_1 = self.isogeo.search(specific_md=(md_a.get("_id"),))
search_ids_2 = self.isogeo.search(
specific_md=(md_a.get("_id"), md_b.get("_id"))
)
search_ids_3 = self.isogeo.search(
specific_md=(md_a.get("_id"), md_b.get("_id"), md_bad)
)
# test length
self.assertEqual(len(search_ids_1.results), 1)
self.assertEqual(len(search_ids_2.results), 2)
self.assertEqual(len(search_ids_3.results), 2)
def test_search_specific_mds_bad(self):
"""Searches filtering on specific metadata."""
# get random metadata within a small search
search = self.isogeo.search(whole_results=0)
metadata_id = sample(search.results, 1)[0]
# pass metadata UUID
with self.assertRaises(TypeError):
self.isogeo.search(
page_size=0, whole_results=0, specific_md=metadata_id.get("_id")
)
# includes
def test_search_includes_ok(self):
"""Searches including includes."""
self.isogeo.search(page_size=0, whole_results=0, include=("links", "contacts"))
def test_search_includes_all_ok(self):
"""Searches including includes."""
self.isogeo.search(page_size=0, whole_results=0, include="all")
def test_search_includes_empty(self):
"""Search with empty includes list."""
self.isogeo.search(page_size=0, whole_results=0, include=())
def test_search_includes_bad(self):
"""Include sub_resrouces require a list."""
with self.assertRaises(TypeError):
self.isogeo.search(page_size=0, whole_results=0, include="links")
# query
def test_search_parameter_query_ok(self):
"""Search with good query parameters."""
# contacts
self.isogeo.search(
query="contact:group:643f1035377b4ca59da6f31a39704c34",
page_size=0,
whole_results=0,
)
self.isogeo.search(
query="contact:08b3054757544463abd06f3ab51ee491:fe3e8ef97b8446be92d3c315ccbc70f9",
page_size=0,
whole_results=0,
)
# catalog
self.isogeo.search(
query="catalog:633216a375ab48ca8ca72e4a1af7a266",
page_size=0,
whole_results=0,
)
# CSW data-source
self.isogeo.search(
query="data-source:ace35ec171da4d0aa2f10e7308dcbdc5",
page_size=0,
whole_results=0,
)
# format
self.isogeo.search(query="format:shp", page_size=0, whole_results=0)
# has-no
self.isogeo.search(query="has-no:keyword", page_size=0, whole_results=0)
# inspire themes
self.isogeo.search(
query="keyword:inspire-theme:administrativeunits",
page_size=0,
whole_results=0,
)
# keyword
self.isogeo.search(query="keyword:isogeo:2018", page_size=0, whole_results=0)
# licenses
self.isogeo.search(
query="license:isogeo:63f121e14eda4f47b748595e0bcccc31",
page_size=0,
whole_results=0,
)
self.isogeo.search(
query="license:32f7e95ec4e94ca3bc1afda960003882:76c02a0baf594c77a569b3a1325aee30",
page_size=0,
whole_results=0,
)
# SRS
self.isogeo.search(query="coordinate-system:2154", page_size=0, whole_results=0)
# types
self.isogeo.search(query="type:dataset", page_size=0, whole_results=0)
self.isogeo.search(query="type:vector-dataset", page_size=0, whole_results=0)
self.isogeo.search(query="type:raster-dataset", page_size=0, whole_results=0)
self.isogeo.search(query="type:service", page_size=0, whole_results=0)
self.isogeo.search(query="type:resource", page_size=0, whole_results=0)
# workgroup - owner
self.isogeo.search(
query="owner:32f7e95ec4e94ca3bc1afda960003882", page_size=0, whole_results=0
)
# unknown
self.isogeo.search(query="unknown:filter", page_size=0, whole_results=0)
def test_search_bad_parameter_query(self):
"""Search with bad parameter."""
with self.assertRaises(ValueError):
self.isogeo.search(query="type:youpi")
with self.assertRaises(ValueError):
self.isogeo.search(query="action:yipiyo")
with self.assertRaises(ValueError):
self.isogeo.search(query="provider:youplaboum")
def test_search_bad_parameter_geographic(self):
"""Search with bad parameter."""
# geometric operator
with self.assertRaises(ValueError):
# georel should'nt be used without box or geo
self.isogeo.search(georel="intersects")
with self.assertRaises(ValueError):
# georel bad value
self.isogeo.search(bbox="-4.970,30.69418,8.258,51.237", georel="cross")
def test_parameter_not_unique_search(self):
"""SDK raises error for search with a parameter that must be unique."""
with self.assertRaises(ValueError):
self.isogeo.search(
query="coordinate-system:32517 coordinate-system:4326"
)
with self.assertRaises(ValueError):
self.isogeo.search(query="format:shp format:dwg")
with self.assertRaises(ValueError):
self.isogeo.search(
query="owner:32f7e95ec4e94ca3bc1afda960003882 owner:08b3054757544463abd06f3ab51ee491"
)
with self.assertRaises(ValueError):
self.isogeo.search(query="type:vector-dataset type:raster-dataset")
# disabling check, it should not raise anything
self.isogeo.search(
query="coordinate-system:32517 coordinate-system:4326", check=0
)
self.isogeo.search(query="format:shp format:dwg", check=0)
self.isogeo.search(
query="owner:32f7e95ec4e94ca3bc1afda960003882 owner:08b3054757544463abd06f3ab51ee491",
check=0,
)
self.isogeo.search(query="type:vector-dataset type:raster-dataset", check=0)
# search utilities
def test_search_augmented(self):
"""Augmented search with shares UUID"""
# at start, shares_id attribute doesn't exist
self.assertFalse(hasattr(self.isogeo, "shares_id"))
# normal
search = self.isogeo.search(page_size=0, whole_results=0, augment=0)
tags_shares = [i for i in search.tags if i.startswith("share:")]
# shares_id attribute still doesn't exist
self.assertEqual(len(tags_shares), 0)
self.assertFalse(hasattr(self.isogeo, "shares_id"))
# augment it
search = self.isogeo.search(page_size=0, whole_results=0, augment=1)
# compare
tags_shares = [i for i in search.tags if i.startswith("share:")]
self.assertNotEqual(len(tags_shares), 0)
self.assertTrue(hasattr(self.isogeo, "shares_id")) # now it exists
# redo using existing attribute
search = self.isogeo.search(page_size=0, whole_results=0, augment=1)
| gpl-3.0 | -1,019,222,056,491,242,000 | 36.314642 | 105 | 0.574386 | false |
ibab/missing_hep | missing_hep/fourmomentum.py | 1 | 2278 | """
Implementation of a vectorized four-momentum vector class
Adapted from
http://lapth.cnrs.fr/pg-nomin/wymant/FourMomentumClass.py
originally implemented by Chris Waymant
"""
import numpy as np
class FourMomentum:
def __init__(self, E, px, py, pz):
self.E = np.array(E)
self.px = np.array(px)
self.py = np.array(py)
self.pz = np.array(pz)
def __add__(self, other):
return FourMomentum(
self.E + other.E,
self.px + other.px,
self.py + other.py,
self.pz + other.pz
)
def __sub__(self, other):
return FourMomentum(
self.E - other.E,
self.px - other.px,
self.py - other.py,
self.pz - other.pz
)
def __mul__(self, other):
if isinstance(other, FourMomentum):
return self.E * other.E - self.px * other.px - self.py * other.py - self.pz * other.pz
else:
# Try to scale the four vector
return FourMomentum(
self.E * other,
self.px * other,
self.py * other,
self.pz * other
)
def __rmul__(self, other):
return self.__mul__(other)
def mass(self):
return np.sqrt(self * self)
def dot3D(self,other):
return (self.px * other.px + self.py * other.py + self.pz * other.pz)
def angle(self,other):
costheta = self.dot3D(other) / (self.dot3D(self) * other.dot3D(other))
return np.arccos(costheta)
def phi(self):
phitemp = np.arctan2(self.py,self.px)
phitemp[phitemp < 0] += 2 * np.pi
return phitemp
def pT(self):
return (self.px**2 + self.py**2)**0.5
def p(self):
return np.sqrt(self.dot3D(self))
def theta(self):
return np.arctan2(self.pT(), self.pz)
def eta(self):
thetatemp = self.theta()
return -np.log(np.tan(thetatemp/2.0))
def deltaPhi(self, other):
delta_phi = np.abs(self.phi() - other.phi())
tmp = delta_phi[delta_phi > np.pi]
tmp = 2 * np.pi - tmp
return delta_phi
def deltaR(self, other):
delta_phi = self.deltaPhi(other)
delta_eta = np.abs(self.eta()-other.eta())
return np.sqrt(delta_phi**2 + delta_eta**2)
def eT(self):
return np.sqrt((self.px**2 + self.py**2) + self * self)
| mit | -8,135,912,276,558,034,000 | 24.311111 | 94 | 0.566286 | false |
Danielweber7624/pybuilder | src/main/python/pybuilder/plugins/python/flake8_plugin.py | 1 | 3425 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin for Tarek Ziade's flake8 script.
Flake8 is a wrapper around: PyFlakes, pep8, Ned's McCabe script.
https://bitbucket.org/tarek/flake8
"""
from pybuilder.core import after, task, init, use_plugin, depends
from pybuilder.errors import BuildFailedException
from pybuilder.utils import assert_can_execute
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
__author__ = 'Michael Gruber'
use_plugin("python.core")
@init
def initialize_flake8_plugin(project):
project.build_depends_on("flake8")
project.set_property("flake8_break_build", False)
project.set_property("flake8_max_line_length", 120)
project.set_property("flake8_exclude_patterns", None)
project.set_property("flake8_include_test_sources", False)
project.set_property("flake8_include_scripts", False)
@after("prepare")
def assert_flake8_is_executable(logger):
""" Asserts that the flake8 script is executable. """
logger.debug("Checking if flake8 is executable.")
assert_can_execute(command_and_arguments=["flake8", "--version"],
prerequisite="flake8",
caller="plugin python.flake8")
@task
@depends("prepare")
def analyze(project, logger):
""" Applies the flake8 script to the sources of the given project. """
logger.info("Executing flake8 on project sources.")
verbose = project.get_property("verbose")
project.set_property_if_unset("flake8_verbose_output", verbose)
command = ExternalCommandBuilder('flake8', project)
command.use_argument('--ignore={0}').formatted_with_truthy_property('flake8_ignore')
command.use_argument('--max-line-length={0}').formatted_with_property('flake8_max_line_length')
command.use_argument('--exclude={0}').formatted_with_truthy_property('flake8_exclude_patterns')
include_test_sources = project.get_property("flake8_include_test_sources")
include_scripts = project.get_property("flake8_include_scripts")
result = command.run_on_production_source_files(logger,
include_test_sources=include_test_sources,
include_scripts=include_scripts)
count_of_warnings = len(result.report_lines)
count_of_errors = len(result.error_report_lines)
if count_of_errors > 0:
logger.error('Errors while running flake8, see {0}'.format(result.error_report_file))
if count_of_warnings > 0:
if project.get_property("flake8_break_build"):
error_message = "flake8 found {0} warning(s)".format(count_of_warnings)
raise BuildFailedException(error_message)
else:
logger.warn("flake8 found %d warning(s).", count_of_warnings)
| apache-2.0 | -2,326,735,212,889,654,000 | 37.483146 | 99 | 0.688467 | false |
itamaro/esxi-tools | esxitools/backup.py | 1 | 14135 | import os
import datetime
from glob import glob
import re
from tendo import singleton
import paramiko
from scp import SCPClient
from ftplib import FTP
from string import Template
from tempfile import mkstemp
import logging
import io
import utils
log_stream = io.StringIO()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(log_stream)
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter(u'%(asctime)s\t%(levelname)s\t%(message)s'))
logger.addHandler(sh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
try:
import settings
except ImportError:
logger.error(u'No settings.py file found!')
import sys
sys.exit(1)
def is_time_in_window(t, ranges):
for ts, te in ranges:
if ts <= t <= te:
return True
return False
def get_current_time():
import time
now = time.localtime()
return datetime.time(now.tm_hour, now.tm_min, now.tm_sec)
class BackupProfile(object):
_no_such_file_or_dir_re = re.compile(u'No such file or directory')
_backup_archive_re = re.compile(u'(?P<vmname>.+)\-'
'(?P<ts>\d{4}\-\d{2}\-\d{2}\_\d{2}\-\d{2}\-\d{2})\.tar\.gz')
_t = None
_chan = None
@classmethod
def _get_current_time(cls):
return datetime.datetime.now()
@classmethod
def _apply_template(cls, tmpl_file_path, tmpl_params, out_file_path=None):
"""
Applies template-parameters to template-file.
Creates an output file with applied template.
If `out_file_path` not specified, a temp file will be used.
"""
# Read the content of the file as a template string
with open(tmpl_file_path, 'r') as tmpl_file:
tmpl_str = Template(tmpl_file.read())
# Apply the template and save to the output file
out_string = tmpl_str.safe_substitute(tmpl_params)
if not out_file_path:
f, out_file_path = mkstemp(text=True)
os.close(f)
with io.open(out_file_path, 'w', newline='\n') as f:
f.write(out_string)
return out_file_path
def __init__(self, profile_dict):
self.__dict__.update(profile_dict)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._close_ssh_transport()
def _get_ssh_transport(self):
if self._t:
return self._t
self._t = paramiko.Transport((self.host_ip, self.ssh_port))
self._t.start_client()
self._t.auth_password(self.ssh_user, self.ssh_password)
return self._t
def _close_ssh_transport(self):
self._close_ssh_session()
if self._t:
self._t.close()
self._t = None
def _get_ssh_session(self):
# if self._chan and not self._chan.closed:
# print 'pre', self._chan
# return self._chan
self._chan = self._get_ssh_transport().open_session()
self._chan.set_combine_stderr(True)
return self._chan
def _close_ssh_session(self):
if self._chan:
self._chan.close()
self._chan = None
def _run_ssh_command(self, cmd):
# Open an SSH session and execute the command
chan = self._get_ssh_session()
chan.exec_command('%s ; echo exit_code=$?' % (cmd))
stdout = ''
x = chan.recv(1024)
while x:
stdout += x
x = chan.recv(1024)
output = stdout.strip().split('\n')
exit_code = re.match('exit_code\=(\-?\d+)', output[-1]).group(1)
if not '0' == exit_code:
logger.debug(u'SSH command "%s" failed with output:\n%s' %
(cmd, '\n'.join(output)))
raise RuntimeWarning(u'Remote command failed with code %s' %
(exit_code))
return '\n'.join(output[:-1])
def _get_vm_config(self, vmname, config):
vm_dict = self.backup_vms[vmname]
if config in vm_dict:
return vm_dict[config]
return self.default_vm_config[config]
def _list_backup_archives(self):
glob_str = os.path.join(self.backups_archive_dir, u'*.tar.gz')
return glob(glob_str)
def _list_backup_archives_for_vm(self, vmname):
glob_str = os.path.join(self.backups_archive_dir,
u'%s-*.tar.gz' % (vmname))
return glob(glob_str)
def get_latest_archives(self):
"""
Returns dictionary of existing archives in `backup_archive_dir`,
with VM names as keys and the latest available backup timestamp
as value.
"""
res = dict()
for archive_path in self._list_backup_archives():
_, archive = os.path.split(archive_path)
m = re.match(u'(?P<vmname>.+)\-'
'(?P<ts>\d{4}\-\d{2}\-\d{2}\_\d{2}\-\d{2}\-\d{2})\.tar\.gz',
archive)
if m:
vmname = m.groupdict()[u'vmname']
ts = datetime.datetime.strptime(m.groupdict()[u'ts'],
'%Y-%m-%d_%H-%M-%S')
if vmname in res:
if ts > res[vmname]:
res[vmname] = ts
else:
res[vmname] = ts
return res
def is_vm_backup_overdue(self, vmname, ts):
"Returns True if `vmname` backup from `ts` is older than period"
time_since_last_backup = self._get_current_time() - ts
if not vmname in self.backup_vms:
logger.warning(u'VM "%s" not in profile, but archive found' %
(vmname))
return False
period = self._get_vm_config(vmname, u'period')
assert type(period) == datetime.timedelta
return time_since_last_backup >= period
def get_next_vm_to_backup(self):
"""
"""
# First priority - VMs with no existing archives
for vmname in self.backup_vms.keys():
if not self._list_backup_archives_for_vm(vmname):
logger.debug(u'VM "%s" is ready next (no existing archives)' %
vmname)
return vmname
# Second priority - the VM with the oldest archive that is overdue
ret_vm = None
ret_vm_last_backup = None
for vmname, ts in self.get_latest_archives().iteritems():
if self.is_vm_backup_overdue(vmname, ts):
logger.debug(u'VM "%s" backup is overdue' % (vmname))
if ret_vm_last_backup:
if ts < ret_vm_last_backup:
ret_vm = vmname
ret_vm_last_backup = ts
else:
ret_vm = vmname
ret_vm_last_backup = ts
return ret_vm
def _upload_file(self, local_source, remote_destination):
scp = SCPClient(self._get_ssh_transport())
scp.put(local_source, remote_destination)
def _set_remote_chmod(self, remote_file):
return self._run_ssh_command(u'chmod +x %s' % (remote_file))
def _remove_remote_file(self, remote_file):
self._run_ssh_command('rm %s' % (remote_file))
def _remove_local_file(self, file):
os.remove(file)
def _parse_ghettovcb_output(self, raw_output):
ret_dict = {u'WARNINGS': list()}
info_prefix = u'\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2} \-\- info\:'
config_matcher = re.compile(
u'%s CONFIG \- (?P<key>\w+) \= (?P<val>.+)' % (info_prefix))
warn_matcher = re.compile(u'%s WARN\: (?P<msg>.+)' % (info_prefix))
duration_matcher = re.compile(
u'%s Backup Duration\: (?P<time>.+)' % (info_prefix))
final_status_matcher = re.compile(
u'%s \#{6} Final status\: (?P<status>.+) \#{6}' % (info_prefix))
for raw_line in raw_output.split(u'\n'):
config = config_matcher.match(raw_line)
if config:
ret_dict[config.groupdict()[u'key']] = \
config.groupdict()[u'val']
continue
warning = warn_matcher.match(raw_line)
if warning:
ret_dict[u'WARNINGS'].append(warning.groupdict()[u'msg'])
continue
duration = duration_matcher.match(raw_line)
if duration:
ret_dict[u'BACKUP_DURATION'] = duration.groupdict()[u'time']
continue
final_status = final_status_matcher.match(raw_line)
if final_status:
status = final_status.groupdict()[u'status']
ret_dict[u'FINAL_STATUS'] = u'All VMs backed up OK!' == status
continue
return ret_dict
def _run_remote_backup(self, vmname):
"Run ghettovcb script to backup the specified VM"
# Generate ghettovcb script from template
local_script = self._apply_template(
self.ghettovcb_script_template,
{u'RemoteBackupDir': self.remote_backup_dir}
)
# Upload ghettovcb script to host and make it executable
remote_script = '/'.join((self.remote_workdir, 'ghettovcb.sh'))
self._upload_file(local_script, remote_script)
self._set_remote_chmod(remote_script)
# cleanup local temp
self._remove_local_file(local_script)
# Run ghettovcb script for the requested vm-name
backup_cmd = '%s -m %s' % (remote_script, vmname)
cmd_result = self._run_ssh_command(backup_cmd)
self._remove_remote_file(remote_script)
# Parse the output and return the result
return self._parse_ghettovcb_output(cmd_result)
def _archive_remote_backup(self, vmname, backup_dir):
"Tar's and GZip's the backup dir, returning full path of the archive"
remote_workdir = u'/'.join((self.remote_backup_dir, vmname))
remote_archive = u'%s.tar.gz' % (backup_dir)
tar_cmd = u'cd "%s"; tar -cz -f "%s" "%s"' % \
(remote_workdir, remote_archive, backup_dir)
tar_output = self._run_ssh_command(tar_cmd)
if self._no_such_file_or_dir_re.search(tar_output):
raise RuntimeError(u'Tar command failed:\n%s' % (tar_output))
return '/'.join((remote_workdir, remote_archive))
def _download_archive(self, remote_path):
"""
Downloads a remote file at `remote_path` via FTP to
`self.backups_archive_dir` using same file name,
returning the total time it took (in seconds).
"""
from time import time
ts = time()
_, remote_filename = os.path.split(remote_path)
dest_path = os.path.join(self.backups_archive_dir, remote_filename)
ftp = FTP(self.host_ip)
ftp.login(self.ftp_user, self.ftp_password)
with open(dest_path, 'wb') as dest_file:
ftp.retrbinary(u'RETR %s' % (remote_path), dest_file.write)
return time() - ts
def backup_vm(self, vmname):
ghettovcb_output = self._run_remote_backup(vmname)
logger.info(u'ghettovcb output:\n%s' % (
u'\n'.join(
[u'\t%s: %s' % (k,v)
for k,v in ghettovcb_output.iteritems()])))
if not ghettovcb_output[u'FINAL_STATUS']:
# Something failed
return False
backup_name = ghettovcb_output[u'VM_BACKUP_DIR_NAMING_CONVENTION']
backup_dir = u'%s-%s' % (vmname, backup_name)
remote_archive = self._archive_remote_backup(vmname, backup_dir)
download_time = self._download_archive(remote_archive)
logger.info(u'Backup archive "%s" downloaded to "%s" in %f seconds.' %
(remote_archive, self.backups_archive_dir, download_time))
self._remove_remote_file(remote_archive)
logger.info(u'Cleaned up archive from remote host')
def trim_backup_archives(self):
for vmname in self.backup_vms.keys():
vm_archives = self._list_backup_archives_for_vm(vmname)
rot_count = self._get_vm_config(vmname, u'rotation_count')
for archive_to_delete in sorted(vm_archives)[:-rot_count]:
logger.info(u'Deleting archive "%s"' %
(archive_to_delete))
self._remove_local_file(archive_to_delete)
def backup(**kwargs):
# Avoid multiple instances of backup program
me = singleton.SingleInstance(flavor_id=u'esxi-backup')
# Obtain profile configuration
if not u'profile_name' in kwargs:
raise RuntimeError(u'Missing profile_name argument')
profile_name = kwargs[u'profile_name']
if not profile_name in settings.ESXI_BACKUP_PROFILES:
raise RuntimeError(u'No such profile "%s"' % profile_name)
profile = settings.ESXI_BACKUP_PROFILES[profile_name]
logger.info(u'Running backup profile "%s"' % (profile_name))
# Check if profile is currently active
t = get_current_time()
if not is_time_in_window(t, profile['backup_times']):
logger.debug(u'Out of time range. Skipping backup run for profile.')
return True
with BackupProfile(profile) as bp:
next_vm = bp.get_next_vm_to_backup()
if next_vm:
logger.info(u'Running backup for VM "%s"' % (next_vm))
bp.backup_vm(next_vm)
bp.trim_backup_archives()
if bp.email_report:
utils.send_email(
bp.gmail_user, bp.gmail_pwd, bp.from_field, bp.recipients,
u'BACKUP OK %s' % (next_vm), log_stream.getvalue())
else:
logger.info(u'No next VM to backup - Nothing to do.')
return True
| mit | 4,045,872,865,581,679,000 | 38.617816 | 78 | 0.55168 | false |
mattseymour/django | tests/staticfiles_tests/test_liveserver.py | 2 | 2974 | """
A subset of the tests in tests/servers/tests exercising
django.contrib.staticfiles.testing.StaticLiveServerTestCase instead of
django.test.LiveServerTestCase.
"""
import os
from urllib.request import urlopen
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
TEST_ROOT = os.path.dirname(__file__)
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
}
class LiveServerBase(StaticLiveServerTestCase):
available_apps = []
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(LiveServerBase, cls).tearDownClass()
# Restore original settings
cls.settings_override.disable()
class StaticLiveServerChecks(LiveServerBase):
@classmethod
def setUpClass(cls):
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception()
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls):
try:
super(StaticLiveServerChecks, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except ImproperlyConfigured:
# This raises ImproperlyConfigured("You're using the staticfiles
# app without having set the required STATIC_URL setting.")
pass
finally:
super(StaticLiveServerChecks, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overridden setUpClass() method is executed.
pass
class StaticLiveServerView(LiveServerBase):
def urlopen(self, url):
return urlopen(self.live_server_url + url)
# The test is going to access a static file stored in this application.
@modify_settings(INSTALLED_APPS={'append': 'staticfiles_tests.apps.test'})
def test_collectstatic_emulation(self):
"""
StaticLiveServerTestCase use of staticfiles' serve() allows it
to discover app's static assets without having to collectstatic first.
"""
with self.urlopen('/static/test/file.txt') as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'In static directory.')
| bsd-3-clause | -5,763,787,898,378,140,000 | 32.795455 | 79 | 0.679892 | false |
rsethur/DMNPlusTF | Attention.py | 1 | 3210 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from dnn import matmul_3d_2d
from dnn import l2_reg
class DMNAttentionGate:
"""
Possible enhancements
1. make scope as an input param
2. get shape from inputs itself
"""
def __init__(self, hidden_size, num_attention_features, dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()):
with tf.variable_scope("AttentionGate"):
self.b_1 = tf.get_variable("bias_1", (hidden_size,), dtype=dtype, initializer=tf.constant_initializer(0.0))
self.W_1 = tf.get_variable("W_1", (hidden_size*num_attention_features, hidden_size), dtype=dtype, initializer=initializer, regularizer=l2_reg)
self.W_2 = tf.get_variable("W_2", (hidden_size, 1), dtype=dtype, initializer=initializer, regularizer=l2_reg)
self.b_2 = tf.get_variable("bias_2", 1, dtype=dtype, initializer=tf.constant_initializer(0.0))
def get_attention(self, questions, prev_memory, facts, sentences_per_input_in_batch):
#questions_gru_final_state dim: batch_size, hidden_size
#facts_from_fusion_layer dim: [max_sentences_per_input, [batch_size, hidden_size]]
with tf.variable_scope("AttentionGate"):
# features dim: list of length 4, each [max_sentences_per_input, batch_size, hidden_size]
features = [facts * questions, facts * prev_memory, tf.abs(facts - questions),
tf.abs(facts - prev_memory)]
#dim: [max_sentences_per_input, batch_size, 4*hidden_size]
feature_vectors = tf.concat(2, features)
#term 1( with W1) = list of max_sentences_per_input: [batch_size, hidden_size]
#whole expression i.e. with term 2( with W2) = list of max_sentences_per_input: [batch_size,1]
"""
#code using einsum (without looping)
#Not working due to the bug: https://github.com/tensorflow/tensorflow/issues/6384
#Fixed in version 1.0 onwards!
inner_term = tf.tanh(matmul_3d_2d(feature_vectors, self.W_1, self.b_1))
attentions = matmul_3d_2d(inner_term, self.W_2, self.b_2)
"""
#code using looping
max_sentences_per_input = feature_vectors.get_shape().as_list()[0]
attentions = []
for i in range(max_sentences_per_input):
attentions.append(tf.matmul(tf.tanh(tf.matmul(feature_vectors[i], self.W_1) + self.b_1), self.W_2) + self.b_2)
# attentions out: max_sentences_per_input, batch_size
attentions = tf.squeeze(attentions, axis=2)
#remove attn
attn_mask = tf.transpose(tf.sequence_mask(sentences_per_input_in_batch, dtype=tf.float32, maxlen=max_sentences_per_input))
attentions = attentions * attn_mask
"""
softmax_att: max_sentences_per_input, batch_size
dim=0 is needed so we don't need to transpose the attention matrix before applying softmax
"""
softmax_attn = tf.nn.softmax(attentions, dim=0)
return softmax_attn
| mit | 2,930,214,116,487,884,000 | 46.205882 | 154 | 0.632399 | false |
Donkyhotay/MoonPy | zope/server/logger/socketlogger.py | 1 | 1579 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Socket Logger
Sends logging messages to a socket.
$Id: socketlogger.py 26567 2004-07-16 06:58:27Z srichter $
"""
import asynchat
import socket
from zope.server.interfaces.logger import IMessageLogger
from zope.interface import implements
class SocketLogger(asynchat.async_chat):
"""Log to a stream socket, asynchronously."""
implements(IMessageLogger)
def __init__(self, address):
if type(address) == type(''):
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(address)
self.address = address
def __repr__(self):
return '<socket logger: address=%s>' % (self.address)
def logMessage(self, message):
'See IMessageLogger'
if message[-2:] != '\r\n':
self.socket.push(message + '\r\n')
else:
self.socket.push(message)
| gpl-3.0 | -9,021,838,366,546,479,000 | 31.895833 | 78 | 0.613046 | false |
RaviVengatesh/Guvi_codes | Translation.py | 1 | 1090 | def TotalTranslation(input_seq):
Total_Seq=0;
input_seq_len=len(input_seq);
Seq_Itr=0;
Seq_Stack_Init=[];
Seq_Stack_Len=[];
for itr in range(0,input_seq_len):
Seq_Stack_Init.append(0);
Seq_Stack_Len.append(1);
input_seq_len-=1;
Seq_Itr+=1;
while(1):
while(Seq_Itr>0):
Next_Start_Pos=Seq_Stack_Init[Seq_Itr-1]+Seq_Stack_Len[Seq_Itr-1];
Next_Seq_Len=Next_Start_Pos+1;
if(Next_Seq_Len>input_seq_len):
break;
Seq_Stack_Init[Seq_Itr]=Next_Start_Pos;
Seq_Stack_Len[Seq_Itr]=1;
Seq_Itr+=1;
if(Next_Seq_Len==input_seq_len):
Total_Seq+=1;
break;
while(Seq_Itr>0):
Prev_Start_Pos_Start=Seq_Stack_Init[Seq_Itr-1];
if(Seq_Stack_Len[Seq_Itr-1]==1 and Prev_Start_Pos_Start+2<=input_seq_len+1):
num=input_seq[Prev_Start_Pos_Start:Prev_Start_Pos_Start+2];
num=int(num);
if(num<=26):
Seq_Stack_Len[Seq_Itr-1]=2;
if(Prev_Start_Pos_Start+2>=input_seq_len):
Total_Seq+=1;
break;
Seq_Itr-=1;
if(Seq_Itr<=0):
break;
return Total_Seq;
input_seq=input();
print(TotalTranslation(input_seq));
| gpl-3.0 | 7,337,645,847,466,676,000 | 26.948718 | 79 | 0.627523 | false |
xcrespo/Flute-Fingering | fingering.py | 1 | 1301 | #Converts a txt file containing a list of notes into an image
#with the corresponding fingering images
#Inputs: txt file name
from PIL import Image
import math
import sys
#Definitions
iHeigth = 300
iWidth = 150
iGridWidth = 8.0 #items
i = 0 #counts the current note index
#Open fingering images
si0 = Image.open("Notes/si0.jpg")
do = Image.open("Notes/do.jpg")
re = Image.open("Notes/re.jpg")
mi = Image.open("Notes/mi.jpg")
fa = Image.open("Notes/fa.jpg")
sol = Image.open("Notes/sol.jpg")
la = Image.open("Notes/la.jpg")
si = Image.open("Notes/si.jpg")
do2 = Image.open("Notes/do2.jpg")
#Create dictionaries
dec = {'b0':si0, 'c':do, 'd':re, 'e':mi, 'f':fa, 'g':sol, 'a':la, 'b':si, 'c2':do2}
#Open & Read original file
f = open(sys.argv[1], 'r')
fname = f.name.split('.')
fname = fname[0]
notes = f.read().split()
#Generate output file
iOutHeigth = int(math.ceil(len(notes) / iGridWidth)) * iHeigth
iOutWidth = int(150 * iGridWidth)
out = Image.new("RGB",(iOutWidth ,iOutHeigth),0xFFFFFF) #init white background
#Go over the notes list and paste the corresponding images
#inside the corresponding place of the output file
for note in notes:
iPosX = int(i % iGridWidth) * iWidth
iPosY = int(i // iGridWidth) * iHeigth
i+=1
out.paste(dec[note],(iPosX,iPosY))
out.save(fname+".jpg","JPEG") | gpl-2.0 | 8,647,078,093,658,456,000 | 26.125 | 83 | 0.69485 | false |
iw3hxn/LibrERP | c2c_sequence_fy/models/__init__.py | 1 | 1297 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 Camptocamp (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_fiscalyear
from . import ir_sequence_type
from . import ir_sequence
from . import account
from . import account_move
from . import ir_sequence_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,373,742,626,826,669,600 | 45.321429 | 79 | 0.635312 | false |
drtuxwang/system-config | bin/offline.py | 1 | 3502 | #!/usr/bin/env python3
"""
Run a command without network access.
"""
import argparse
import glob
import os
import signal
import sys
from typing import List
import command_mod
import subtask_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_command(self) -> command_mod.Command:
"""
Return command Command class object.
"""
return self._command
def _parse_args(self, args: List[str]) -> List[str]:
parser = argparse.ArgumentParser(
description='Run a command without network access.',
)
parser.add_argument(
'command',
nargs=1,
help='Command to run.'
)
parser.add_argument(
'args',
nargs='*',
metavar='arg',
help='Command argument.'
)
my_args = []
for arg in args:
my_args.append(arg)
if not arg.startswith('-'):
break
self._args = parser.parse_args(my_args)
return args[len(my_args):]
@staticmethod
def _get_command(directory: str, command: str) -> command_mod.Command:
if os.path.isfile(command):
return command_mod.CommandFile(os.path.abspath(command))
file = os.path.join(directory, command)
if os.path.isfile(file):
return command_mod.CommandFile(file)
return command_mod.Command(command)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
command_args = self._parse_args(args[1:])
self._command = self._get_command(
os.path.dirname(args[0]),
self._args.command[0]
)
self._command.set_args(command_args)
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
@staticmethod
def _get_unshare() -> List[str]:
unshare = command_mod.Command('unshare', errors='ignore')
if unshare.is_found():
task = subtask_mod.Batch(unshare.get_cmdline() + ['--help'])
task.run(pattern='--map-root-user')
if task.has_output():
return unshare.get_cmdline() + ['--net', '--map-root-user']
return []
@classmethod
def run(cls) -> int:
"""
Start program
"""
options = Options()
cmdline = cls._get_unshare()
if cmdline:
print('Unsharing network namespace...')
task = subtask_mod.Exec(cmdline + options.get_command().get_cmdline())
task.run()
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | 3,856,035,756,455,022,600 | 23.48951 | 78 | 0.519132 | false |
sainipray/djadmin | djadmin/middleware.py | 1 | 2550 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.contrib.admin.sites import AdminSite
from django.utils.functional import SimpleLazyObject
from djadmin import settings
from .models import DjadminModelSetting
from .signals import get_register_model_with_mixin, handle_djadmin_field_data, add_visitor
from .util import (get_user_agent, get_admin_color_theme,
get_admin_color_theme_hex_code, is_session_exist,
create_new_session, is_admin_url)
if django.VERSION >= (1, 10):
from django.utils.deprecation import MiddlewareMixin
else:
MiddlewareMixin = object
class DJMiddleware(MiddlewareMixin):
def process_request(self, request):
# Set user_agent of user in request
request.user_agent = SimpleLazyObject(lambda: get_user_agent(request))
# Check user session
if not is_session_exist(request):
# Add as a visitor
session = create_new_session(request)
add_visitor(request)
if is_admin_url(request):
admin_color_theme = get_admin_color_theme(settings.ADMIN_COLOR_THEME)
admin_color_theme_code = get_admin_color_theme_hex_code(admin_color_theme)
allow_forget_password_admin = settings.ALLOW_FORGET_PASSWORD_ADMIN
AdminSite.site_header = settings.ADMIN_HEADER_TITLE
request.ADMIN_COLOR_THEME = admin_color_theme
request.ALLOW_FORGET_PASSWORD_ADMIN = allow_forget_password_admin
request.ADMIN_COLOR_THEME_CODE = admin_color_theme_code
if request.user.is_superuser and settings.DJADMIN_DYNAMIC_FIELD_DISPLAY:
register_model_object_list = get_register_model_with_mixin()
exist_model_object_list = DjadminModelSetting.objects.all()
register_model_list = [model.__name__ for model in register_model_object_list]
exist_model_list = [str(model.model) for model in exist_model_object_list]
create_model_name = [model for model in register_model_list if model not in exist_model_list]
delete_model_name = [model for model in exist_model_list if model not in register_model_list]
if len(create_model_name):
handle_djadmin_field_data(register_model_object_list, True)
if len(delete_model_name):
if settings.DJADMIN_DYNAMIC_DELETE_UNREGISTER_FIELD:
handle_djadmin_field_data(register_model_object_list, False)
| mit | 7,572,932,252,606,137,000 | 51.040816 | 109 | 0.664706 | false |
pytorch/vision | torchvision/_internally_replaced_utils.py | 1 | 1606 | import os
import importlib.machinery
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == 'nt':
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES
)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
| bsd-3-clause | 6,062,785,199,781,493,000 | 28.2 | 84 | 0.648817 | false |
apache/bloodhound | bloodhound_dashboard/bhdashboard/tests/test_webui.py | 2 | 4831 | # -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""Project dashboard for Apache(TM) Bloodhound
In this file you'll find part of the tests written to ensure that
dashboard web module works as expected.
Only the tests requiring minimal setup effort are included below.
This means that the environment used to run these tests contains the
barely minimal information included in an environment (i.e. only the
data specified by `trac.db_default.get_data`.).
Once the tests are started all built-in components (except
trac.versioncontrol.* ) as well as widget system and extensions
are loaded. Besides the following values are (auto-magically)
made available in the global namespace (i.e. provided that
the test name be written like `|widget_name: Descriptive message`):
- __tester__ An instance of `unittest.TestCase` representing the
test case for the statement under test. Useful
when specific assertions (e.g. `assertEquals`)
are needed.
- req A dummy request object setup for anonymous access.
- auth_req A dummy request object setup like if user `murphy` was
accessing the site.
- env the Trac environment used as a stub for testing purposes.
This object is an instance of
`bhdashboard.tests.EnvironmentStub`.
- ticket_data A set of tickets used for testing purposes.
"""
#------------------------------------------------------
# Test artifacts
#------------------------------------------------------
import sys
from bhdashboard.tests import trac_version, trac_tags
def test_suite():
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, REPORT_UDIFF
from dutest import MultiTestLoader
from unittest import defaultTestLoader
from bhdashboard.tests import DocTestTracLoader, ticket_data
magic_vars = dict(ticket_data=ticket_data)
if trac_version < (0, 13): # FIXME: Should it be (0, 12) ?
kwargs = {'enable': ['trac.[a-uw-z]*', 'tracrpc.*', 'bhdashboard.*']}
else:
kwargs = {
'enable': ['trac.*', 'tracrpc.*', 'bhdashboard.*'],
'disable': ['trac.versioncontrol.*']
}
l = MultiTestLoader(
[defaultTestLoader,
DocTestTracLoader(extraglobs=magic_vars,
default_data=True,
optionflags=ELLIPSIS | REPORT_UDIFF |
NORMALIZE_WHITESPACE,
**kwargs)
])
return l.loadTestsFromModule(sys.modules[__name__])
#------------------------------------------------------
# Helper functions
#------------------------------------------------------
from datetime import datetime, time, date
from itertools import izip
from pprint import pprint
from bhdashboard.tests import clear_perm_cache
def prepare_ticket_workflow(tcktrpc, ticket_data, auth_req):
r"""Set ticket status considering the actions defined in standard
ticket workflow. Needed for TracRpc>=1.0.6
"""
from time import sleep
TICKET_ACTIONS = {'accepted': 'accept', 'closed': 'resolve',
'assigned': 'reassign'}
sleep(1)
for idx, (_, __, td) in enumerate(ticket_data):
action = TICKET_ACTIONS.get(td.get('status'))
if action is not None:
aux_attrs = {'action': action}
aux_attrs.update(td)
tcktrpc.update(auth_req, idx + 1, "", aux_attrs)
sleep(1)
for idx, (_, __, td) in enumerate(ticket_data):
tcktrpc.update(auth_req, idx + 1, "", td)
from bhdashboard.web_ui import DashboardModule
__test__ = {
'Initialization: Report widgets' : r"""
""",
'Rendering templates' : r"""
>>> dbm = DashboardModule(env)
>>> from trac.mimeview.api import Context
>>> context = Context.from_request(auth_req)
#FIXME: This won't work. Missing schema
>>> pprint(dbm.expand_widget_data(context))
[{'content': <genshi.core.Stream object at ...>,
'title': <Element "a">}]
""",
}
| apache-2.0 | -2,395,932,893,504,011,000 | 35.598485 | 77 | 0.626164 | false |
EmreAtes/spack | lib/spack/spack/modules/tcl.py | 1 | 4349 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""This module implements the classes necessary to generate TCL
non-hierarchical modules.
"""
import os.path
import string
import spack.tengine as tengine
import llnl.util.tty as tty
from .common import BaseConfiguration, BaseFileLayout
from .common import BaseContext, BaseModuleFileWriter, configuration
#: TCL specific part of the configuration
configuration = configuration.get('tcl', {})
#: Caches the configuration {spec_hash: configuration}
configuration_registry = {}
def make_configuration(spec):
"""Returns the tcl configuration for spec"""
key = spec.dag_hash()
try:
return configuration_registry[key]
except KeyError:
return configuration_registry.setdefault(key, TclConfiguration(spec))
def make_layout(spec):
"""Returns the layout information for spec """
conf = make_configuration(spec)
return TclFileLayout(conf)
def make_context(spec):
"""Returns the context information for spec"""
conf = make_configuration(spec)
return TclContext(conf)
class TclConfiguration(BaseConfiguration):
"""Configuration class for tcl module files."""
@property
def conflicts(self):
"""Conflicts for this module file"""
return self.conf.get('conflict', [])
class TclFileLayout(BaseFileLayout):
"""File layout for tcl module files."""
class TclContext(BaseContext):
"""Context class for tcl module files."""
@tengine.context_property
def prerequisites(self):
"""List of modules that needs to be loaded automatically."""
return self._create_module_list_of('specs_to_prereq')
@tengine.context_property
def conflicts(self):
"""List of conflicts for the tcl module file."""
fmts = []
naming_scheme = self.conf.naming_scheme
f = string.Formatter()
for item in self.conf.conflicts:
if len([x for x in f.parse(item)]) > 1:
for naming_dir, conflict_dir in zip(
naming_scheme.split('/'), item.split('/')
):
if naming_dir != conflict_dir:
message = 'conflict scheme does not match naming '
message += 'scheme [{spec}]\n\n'
message += 'naming scheme : "{nformat}"\n'
message += 'conflict scheme : "{cformat}"\n\n'
message += '** You may want to check your '
message += '`modules.yaml` configuration file **\n'
tty.error(message.format(spec=self.spec,
nformat=naming_scheme,
cformat=item))
raise SystemExit('Module generation aborted.')
item = self.spec.format(item)
fmts.append(item)
# Substitute spec tokens if present
return [self.spec.format(x) for x in fmts]
class TclModulefileWriter(BaseModuleFileWriter):
"""Writer class for tcl module files."""
default_template = os.path.join('modules', 'modulefile.tcl')
| lgpl-2.1 | 2,136,571,882,603,533,300 | 36.491379 | 78 | 0.625431 | false |
abelcarreras/DynaPhoPy | dynaphopy/interface/phonopy_link.py | 1 | 13344 | import numpy as np
from phonopy.api_phonopy import Phonopy
from phonopy.file_IO import parse_BORN, parse_FORCE_SETS, write_FORCE_CONSTANTS, parse_FORCE_CONSTANTS
from phonopy.harmonic.dynmat_to_fc import DynmatToForceConstants
from phonopy.harmonic.force_constants import set_tensor_symmetry_PJ
from phonopy.units import VaspToTHz
from phonopy.structure.symmetry import Symmetry
# support old phonopy versions
try:
from phonopy.structure.atoms import PhonopyAtoms
except ImportError:
from phonopy.structure.atoms import Atoms as PhonopyAtoms
class ForceConstants:
def __init__(self, force_constants, supercell=None):
self._force_constants = np.array(force_constants)
self._supercell = supercell
def get_array(self):
return self._force_constants
def get_supercell(self):
return self._supercell
def set_supercell(self, supercell):
self._supercell = supercell
class ForceSets:
def __init__(self, force_sets, supercell=None):
self._forces = force_sets
self._supercell = supercell
def get_dict(self):
return self._forces
def get_supercell(self):
return self._supercell
def set_supercell(self, supercell):
self._supercell = supercell
def eigenvectors_normalization(eigenvector):
for i in range(eigenvector.shape[0]):
eigenvector[i, :] = eigenvector[i, :]/np.linalg.norm(eigenvector[i, :])
return eigenvector
def get_force_sets_from_file(file_name='FORCE_SETS', fs_supercell=None):
# Just a wrapper to phonopy function
force_sets = ForceSets(parse_FORCE_SETS(filename=file_name))
if fs_supercell is not None:
force_sets.set_supercell(fs_supercell)
else:
print('No force sets supercell defined, set to identity')
force_sets.set_supercell(np.identity(3))
return force_sets
def get_force_constants_from_file(file_name='FORCE_CONSTANTS', fc_supercell=None):
# Just a wrapper to phonopy function
force_constants = ForceConstants(np.array(parse_FORCE_CONSTANTS(filename=file_name)))
if fc_supercell is not None:
force_constants.set_supercell(fc_supercell)
else:
print('No force sets supercell defined, set to identity')
force_constants.set_supercell(np.identity(3))
return force_constants
def save_force_constants_to_file(force_constants, filename='FORCE_CONSTANTS'):
# Just a wrapper to phonopy function
write_FORCE_CONSTANTS(force_constants.get_array(), filename=filename)
def get_phonon(structure, NAC=False, setup_forces=True, custom_supercell=None, symprec=1e-5):
if custom_supercell is None:
super_cell_phonon = structure.get_supercell_phonon()
else:
super_cell_phonon = custom_supercell
# Preparing the bulk type object
bulk = PhonopyAtoms(symbols=structure.get_atomic_elements(),
scaled_positions=structure.get_scaled_positions(),
cell=structure.get_cell())
phonon = Phonopy(bulk, super_cell_phonon,
primitive_matrix=structure.get_primitive_matrix(),
symprec=symprec)
# Non Analytical Corrections (NAC) from Phonopy [Frequencies only, eigenvectors no affected by this option]
if setup_forces:
if structure.get_force_constants() is not None:
phonon.set_force_constants(structure.get_force_constants().get_array())
elif structure.get_force_sets() is not None:
phonon.set_displacement_dataset(structure.get_force_sets().get_dict())
phonon.produce_force_constants()
structure.set_force_constants(ForceConstants(phonon.get_force_constants(),
supercell=structure.get_force_sets().get_supercell()))
else:
print('No force sets/constants available!')
exit()
if NAC:
print("Warning: Using Non Analytical Corrections")
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, is_symmetry=True)
phonon.set_nac_params(nac_params=nac_params)
return phonon
def obtain_eigenvectors_and_frequencies(structure, q_vector, test_orthonormal=False, print_data=True):
phonon = get_phonon(structure)
frequencies, eigenvectors = phonon.get_frequencies_with_eigenvectors(q_vector)
# Making sure eigenvectors are orthonormal (can be omitted)
if test_orthonormal:
eigenvectors = eigenvectors_normalization(eigenvectors)
print('Testing eigenvectors orthonormality')
np.set_printoptions(precision=3, suppress=True)
print(np.dot(eigenvectors.T, np.ma.conjugate(eigenvectors)).real)
np.set_printoptions(suppress=False)
#Arranging eigenvectors by atoms and dimensions
number_of_dimensions = structure.get_number_of_dimensions()
number_of_primitive_atoms = structure.get_number_of_primitive_atoms()
arranged_ev = np.array([[[eigenvectors [j*number_of_dimensions+k, i]
for k in range(number_of_dimensions)]
for j in range(number_of_primitive_atoms)]
for i in range(number_of_primitive_atoms*number_of_dimensions)])
if print_data:
print("Harmonic frequencies (THz):")
print(frequencies)
return arranged_ev, frequencies
def obtain_phonopy_dos(structure, mesh=(40, 40, 40), force_constants=None,
freq_min=None, freq_max=None, projected_on_atom=-1, NAC=False):
if force_constants is None:
phonon = get_phonon(structure,
setup_forces=True,
custom_supercell=None,
NAC=NAC)
else:
phonon = get_phonon(structure,
setup_forces=False,
custom_supercell=force_constants.get_supercell(),
NAC=NAC)
phonon.set_force_constants(force_constants.get_array())
if projected_on_atom < 0:
phonon.run_mesh(mesh)
phonon.run_total_dos(freq_min=freq_min, freq_max=freq_max, use_tetrahedron_method=True)
total_dos = np.array([phonon.get_total_dos_dict()['frequency_points'],
phonon.get_total_dos_dict()['total_dos']])
else:
phonon.run_mesh(mesh, with_eigenvectors=True, is_mesh_symmetry=False)
phonon.run_projected_dos(freq_min=freq_min, freq_max=freq_max)
if projected_on_atom >= len(phonon.get_projected_dos_dict()['projected_dos']):
print('No atom type {0}'.format(projected_on_atom))
exit()
# total_dos = np.array([phonon.get_partial_DOS()[0], phonon.get_partial_DOS()[1][projected_on_atom]])
total_dos = np.array([phonon.get_projected_dos_dict()['frequency_points'],
phonon.get_projected_dos_dict()['projected_dos'][projected_on_atom]])
#Normalize to unit cell
total_dos[1, :] *= float(structure.get_number_of_atoms())/structure.get_number_of_primitive_atoms()
return total_dos
def obtain_phonopy_thermal_properties(structure, temperature, mesh=(40, 40, 40), force_constants=None, NAC=False):
if force_constants is None:
phonon = get_phonon(structure,
setup_forces=True,
custom_supercell=None,
NAC=NAC)
else:
phonon = get_phonon(structure,
setup_forces=False,
custom_supercell=force_constants.get_supercell(),
NAC=NAC)
phonon.set_force_constants(force_constants.get_array())
phonon.run_mesh(mesh)
phonon.run_thermal_properties(t_step=1, t_min=temperature, t_max=temperature)
# t, free_energy, entropy, cv = np.array(phonon.get_thermal_properties()).T[0]
thermal_dict = phonon.get_thermal_properties_dict()
free_energy = thermal_dict['free_energy']
entropy = thermal_dict['entropy']
cv = thermal_dict['heat_capacity']
# Normalize to unit cell
unit_cell_relation = float(structure.get_number_of_atoms())/structure.get_number_of_primitive_atoms()
free_energy *= unit_cell_relation
entropy *= unit_cell_relation
cv *= unit_cell_relation
return free_energy, entropy, cv
def obtain_phonopy_mesh_from_force_constants(structure, force_constants, mesh=(40, 40, 40), NAC=False):
phonon = get_phonon(structure,
setup_forces=False,
custom_supercell=force_constants.get_supercell(),
NAC=NAC)
phonon.set_force_constants(force_constants.get_array())
phonon.run_mesh(mesh)
mesh_dict = phonon.get_mesh_dict()
return mesh_dict['qpoints'], mesh_dict['weights'], mesh_dict['frequencies']
def obtain_phonon_dispersion_bands(structure, bands_ranges, force_constants=None,
NAC=False, band_resolution=30, band_connection=False):
if force_constants is not None:
# print('Getting renormalized phonon dispersion relations')
phonon = get_phonon(structure, NAC=NAC, setup_forces=False,
custom_supercell=force_constants.get_supercell())
phonon.set_force_constants(force_constants.get_array())
else:
# print('Getting phonon dispersion relations')
phonon = get_phonon(structure, NAC=NAC)
bands =[]
for q_start, q_end in bands_ranges:
band = []
for i in range(band_resolution+1):
band.append(np.array(q_start) + (np.array(q_end) - np.array(q_start)) / band_resolution * i)
bands.append(band)
phonon.run_band_structure(bands, is_band_connection=band_connection, with_eigenvectors=True)
bands_dict = phonon.get_band_structure_dict()
return (bands_dict['qpoints'],
bands_dict['distances'],
bands_dict['frequencies'],
bands_dict['eigenvectors'])
def get_commensurate_points(structure, fc_supercell):
phonon = get_phonon(structure, setup_forces=False, custom_supercell=fc_supercell)
primitive = phonon.get_primitive()
supercell = phonon.get_supercell()
dynmat2fc = DynmatToForceConstants(primitive, supercell)
com_points = dynmat2fc.get_commensurate_points()
return com_points
def get_equivalent_q_points_by_symmetry(q_point, structure, symprec=1e-5):
bulk = PhonopyAtoms(symbols=structure.get_atomic_elements(),
scaled_positions=structure.get_scaled_positions(),
cell=structure.get_cell())
tot_points = [list(q_point)]
for operation_matrix in Symmetry(bulk, symprec=symprec).get_reciprocal_operations():
operation_matrix_q = np.dot(np.linalg.inv(structure.get_primitive_matrix()), operation_matrix.T)
operation_matrix_q = np.dot(operation_matrix_q, structure.get_primitive_matrix())
q_point_test = np.dot(q_point, operation_matrix_q)
if (q_point_test >= 0).all():
tot_points.append(list(q_point_test))
tot_points_unique = [list(x) for x in set(tuple(x) for x in tot_points)]
return tot_points_unique
def get_renormalized_force_constants(renormalized_frequencies, eigenvectors, structure, fc_supercell, symmetrize=False):
phonon = get_phonon(structure, setup_forces=False, custom_supercell=fc_supercell)
primitive = phonon.get_primitive()
supercell = phonon.get_supercell()
dynmat2fc = DynmatToForceConstants(primitive, supercell)
size = structure.get_number_of_dimensions() * structure.get_number_of_primitive_atoms()
eigenvectors = np.array([eigenvector.reshape(size, size, order='C').T for eigenvector in eigenvectors ])
renormalized_frequencies = np.array(renormalized_frequencies)
try:
dynmat2fc.set_dynamical_matrices(renormalized_frequencies / VaspToTHz, eigenvectors)
except TypeError:
frequencies_thz = renormalized_frequencies / VaspToTHz
eigenvalues = frequencies_thz ** 2 * np.sign(frequencies_thz)
dynmat2fc.create_dynamical_matrices(eigenvalues=eigenvalues,
eigenvectors=eigenvectors)
dynmat2fc.run()
force_constants = ForceConstants(dynmat2fc.get_force_constants(), supercell=fc_supercell)
# Symmetrize force constants using crystal symmetry
if symmetrize:
print('Symmetrizing force constants')
set_tensor_symmetry_PJ(force_constants.get_array(),
phonon.supercell.get_cell(),
phonon.supercell.get_scaled_positions(),
phonon.symmetry)
return force_constants
if __name__ == "__main__":
import dynaphopy.interface.iofile as reading
input_parameters = reading.read_parameters_from_input_file('/home/abel/VASP/Ag2Cu2O4/MD/input_dynaphopy')
structure = reading.read_from_file_structure_poscar(input_parameters['structure_file_name_poscar'])
structure.set_primitive_matrix(input_parameters['_primitive_matrix'])
# structure.set_supercell_phonon(input_parameters['_supercell_phonon'])
structure.set_force_set(get_force_sets_from_file(file_name=input_parameters['force_constants_file_name']))
obtain_phonopy_dos(structure)
| mit | 1,482,515,654,496,575,000 | 38.131965 | 120 | 0.654152 | false |
AnnieJumpCannon/RAVE | article/figures/plot_bensby.py | 1 | 1775 |
"""
Make label comparisons with Bensby et al. (2014).
"""
import numpy as np
import matplotlib.pyplot as plt
try:
bensby
except NameError: # Do you know who I am?
from rave_io import get_cannon_dr1, get_literature_bensby
rave_cannon_dr1 = get_cannon_dr1()
#OK = (data["SNRK"] > 10) * (data["R_CHI_SQ"] < 3) * (data["R"] > 25)
#rave_cannon_dr1 = rave_cannon_dr1[OK]
bensby = get_literature_bensby()
from astropy.table import join
data = join(rave_cannon_dr1, bensby, keys=("Name", ))
ok = (data["SNRK"] > 10) * (data["R_CHI_SQ"] < 3) * (data["R"] > 25)
data = data[ok].filled()
else:
print("Using pre-loaded data!")
latex_labels = {
"TEFF": r"$T_{\rm eff}$",
"LOGG": r"$\log{g}$",
"FE_H": r"$[{\rm Fe/H}]$"
}
def scatter_comparison(axis, bensby_label_name, label_name, c=None):
"""
Show a scatter plot on the given axis.
"""
x = data[bensby_label_name]
y = data[label_name]
c = data["Teff"]
axis.scatter(x, y, c=c)
limits = np.array([ax.get_xlim(), ax.get_ylim()]).flatten()
limits = [np.min(limits), np.max(limits)]
axis.plot(limits, limits, c="#666666", zorder=-1, linestyle=":")
axis.set_xlim(limits)
axis.set_ylim(limits)
diff = y - x
print(label_name, np.nanmean(diff), np.nanstd(diff))
axis.set_xlabel(" ".join([latex_labels[label_name], r"$({\rm Bensby}+$ $2014)$"]))
axis.set_ylabel(" ".join([latex_labels[label_name], r"$({\rm unRAVE})$"]))
# Compare teff, logg, [Fe/H]
fig, axes = plt.subplots(1, 3)
labels = [
("TEFF", "Teff"),
("LOGG", "logg"),
("FE_H", "Fe_H")
]
for ax, (cannon_label, bensby_label) in zip(axes, labels):
scatter_comparison(ax, bensby_label, cannon_label)
# Compare abundances.
| mit | -8,901,556,318,300,327,000 | 21.468354 | 86 | 0.585352 | false |
mdpiper/topoflow | topoflow/components/met_base.py | 1 | 110618 |
## Does "land_surface_air__latent_heat_flux" make sense? (2/5/13)
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sep 2014. Fixed sign error in update_bulk_richardson_number().
# Ability to compute separate P_snow and P_rain.
# Aug 2014. New CSDMS Standard Names and clean up.
# Nov 2013. Converted TopoFlow to a Python package.
#
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names (version 0.7.9) and BMI.
# May 2012. P is now a 1D array with one element and mutable,
# so any comp with ref to it can see it change.
# Jun 2010. update_net_shortwave_radiation(), etc.
# May 2010. Changes to initialize() and read_cfg_file().
# Aug 2009
# Jan 2009. Converted from IDL.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a "base class" for meteorology
# components as well as any functions used by most or
# all meteorology methods. The methods of this class
# should be over-ridden as necessary for different
# methods of modeling meteorology.
#-----------------------------------------------------------------------
# Notes: Do we ever need to distinguish between a surface
# temperature and snow temperature (in the snow) ?
# Recall that a separate T_soil_x variable is used
# to compute Qc.
#
# Cp_snow is from NCAR CSM Flux Coupler web page
#
# rho_H2O is currently not adjustable with GUI. (still true?)
#
#-----------------------------------------------------------------------
#
# class met_component (inherits from BMI_base.py)
#
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/15/12)
# get_var_units() # (5/15/12)
# ---------------------
# set_constants()
# initialize()
# update()
# finalize()
# ----------------------------
# set_computed_input_vars()
# initialize_computed_vars()
# ----------------------------
# update_P_integral()
# update_P_max()
# update_P_rain() # (9/14/14, new method)
# update_P_snow() # (9/14/14, new method)
# ------------------------------------
# update_bulk_richardson_number()
# update_bulk_aero_conductance()
# update_sensible_heat_flux()
# update_saturation_vapor_pressure()
# update_vapor_pressure()
# update_dew_point() # (7/6/10)
# update_precipitable_water_content() # (7/6/10)
# ------------------------------------
# update_latent_heat_flux()
# update_conduction_heat_flux()
# update_advection_heat_flux()
# ------------------------------------
# update_julian_day() # (7/1/10)
# update_net_shortwave_radiation() # (7/1/10)
# update_em_air() # (7/1/10)
# update_net_longwave_radiation() # (7/1/10)
# update_net_energy_flux() # ("Q_sum")
# ------------------------------------
# open_input_files()
# read_input_files()
# close_input_files()
# ------------------------------------
# update_outfile_names()
# open_output_files()
# write_output_files()
# close_output_files()
# save_grids()
# save_pixel_values()
#
# Functions:
# compare_em_air_methods()
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import solar_funcs as solar
from topoflow.utils import BMI_base
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import rtg_files
#-----------------------------------------------------------------------
class met_component( BMI_base.BMI_component ):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'TopoFlow_Meteorology',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#-------------------------------------------------------------
'comp_name': 'Meteorology',
'model_family': 'TopoFlow',
'cfg_template_file': 'Meteorology.cfg.in',
'cfg_extension': '_meteorology.cfg',
'cmt_var_prefix': '/Meteorology/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Meteorology.xml',
'dialog_title': 'Meteorology: Method 1 Parameters',
'time_units': 'seconds' }
#---------------------------------------------------------
# Note that SWE = "snow water equivalent", but it really
# just means "liquid_equivalent".
#---------------------------------------------------------
_input_var_names = [
'snowpack__z_mean_of_mass-per-volume_density', # rho_snow
'snowpack__depth', # h_snow
'snowpack__liquid-equivalent_depth', # h_swe
'snowpack__melt_volume_flux' ] # SM (MR used for ice?)
#-----------------------------------------------------------
# albedo, emissivity and transmittance are dimensionless.
#-----------------------------------------------------------
# "atmosphere_aerosol_dust__reduction_of_transmittance" vs.
# This TF parameter comes from Dingman, App. E, p. 604.
#-----------------------------------------------------------
# There is an Optical_Air_Mass function in solar_funcs.py.
# However, this quantity is not saved in comp state.
#
# "optical_path_length_ratio" vs. "optical_air_mass" OR
# "airmass_factor" OR "relative_airmass" OR
# "relative_optical_path_length"
#-----------------------------------------------------------
# Our term "liquid_equivalent_precipitation" is widely
# used on the Internet, with 374,000 Google hits.
#--------------------------------------------------------------
# Note: "bulk exchange coefficient" has 2460 Google hits.
# It is closely related to a "transfer coefficient"
# for mass, momentum or heat. There are no CF
# Standard Names with "bulk", "exchange" or "transfer".
#
# Zhang et al. (2000) use "bulk exchange coefficient" in a
# nonstandard way, with units of velocity vs. unitless.
#
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
#---------------------------------------------------------------
# Now this component uses T_air to break the liquid-equivalent
# precip rate into separate P_rain and P_snow components.
# P_rain is used by channel_base.update_R()
# P_snow is used by snow_base.update_depth()
#---------------------------------------------------------------
_output_var_names = [
# 'atmosphere__optical_path_length_ratio', # M_opt [1] (in solar_funcs.py)
# 'atmosphere__von_karman_constant', # kappa
'atmosphere_aerosol_dust__reduction_of_transmittance', # dust_atten ##### (from GUI)
'atmosphere_air-column_water-vapor__liquid-equivalent_depth', # W_p ("precipitable depth")
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor', # canopy_factor
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor', # cloud_factor
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance', # De [m s-1], latent
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance', # Dh [m s-1], sensible
'atmosphere_bottom_air__emissivity', # em_air
'atmosphere_bottom_air__mass-per-volume_density', # rho_air
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity', # Cp_air
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance', # Dn [m s-1], neutral
'atmosphere_bottom_air__pressure', # p0
'atmosphere_bottom_air__temperature', # T_air
'atmosphere_bottom_air_flow__bulk_richardson_number', # Ri [1]
'atmosphere_bottom_air_flow__log_law_roughness_length', # z0_air
'atmosphere_bottom_air_flow__reference-height_speed', # uz
'atmosphere_bottom_air_flow__speed_reference_height', # z
'atmosphere_bottom_air_land_net-latent-heat__energy_flux', # Qe [W m-2]
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux', # Qh [W m-2]
'atmosphere_bottom_air_water-vapor__dew_point_temperature', # T_dew
'atmosphere_bottom_air_water-vapor__partial_pressure', # e_air # (insert "reference_height" ??)
'atmosphere_bottom_air_water-vapor__relative_saturation', # RH
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure', # e_sat_air
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux', # vol_P
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux', # P_max
'atmosphere_water__precipitation_leq-volume_flux', # P [m s-1]
'atmosphere_water__rainfall_volume_flux', # P_rain [m s-1] (liquid)
'atmosphere_water__snowfall_leq-volume_flux', # P_snow [m s-1]
'earth__standard_gravity_constant', # g [m s-2]
'land_surface__albedo', # albedo
'land_surface__aspect_angle', # alpha (from GUI)
'land_surface__emissivity', # em_surf
'land_surface__latitude', # lat_deg [degrees]
'land_surface__longitude', # lon_deg [degrees]
'land_surface__slope_angle', # beta (from GUI)
'land_surface__temperature', # T_surf ### OR JUST "land__temperature"?
# 'land_surface_air__temperature', # T_air
'land_surface_air_water-vapor__partial_pressure', # e_surf # (insert "reference_height" ??)
'land_surface_air_water-vapor__saturated_partial_pressure', # e_sat_surf
'land_surface_net-longwave-radiation__energy_flux', # Qn_LW [W m-2]
'land_surface_net-shortwave-radiation__energy_flux', # Qn_SW [W m-2]
'land_surface_net-total-energy__energy_flux', # Q_sum [W w-2]
'model__time_step', # dt
'physics__stefan_boltzmann_constant', # sigma [W m-2 K-4]
'physics__von_karman_constant', # kappa [1]
'water__mass-specific_latent_fusion_heat', # Lf [J kg-1]
'water__mass-specific_latent_vaporization_heat', # Lv [J kg-1]
'water-liquid__mass-per-volume_density' ] # rho_H2O
#-----------------------------------------
# These are used only in solar_funcs.py
# Later, create a Radiation component.
#---------------------------------------------
# Should we allow "day" as a base quantity ?
# "day_length" is confusing. Think about "date" also.
# Maybe something like:
#
# "earth__mean_solar_rotation_period"
# "earth__sidereal_rotation_period"
# "earth__stellar_rotation_period" (relative to "fixed stars")
# maybe: "earth__complete_rotation_period" ??
#
# OR:
# "earth_mean_solar_day__duration"
# "earth_sidereal_day__duration"
# "earth_stellar_day__duration"
#
# OR perhaps:
# "earth_mean_solar_day__rotation_period"
# "earth_sidereal_day__rotation_period"
# "earth_stellar_day__rotation_period"
#
# "stellar rotation period" gives 84,500 Google hits.
# "solar_rotation_period" gives 41,100 Google hits.
# "sidereal_roation_period" gives 86,000 Google hits.
# "stellar day" gives 136,000 Google hits (but many unrelated).
#
# NB! "stellar_rotation_period" is ambiguous since it is also
# used for the rotation period of a star.
#
# "earth_mean_solar_day__hour_count" ("standard_day" ?)
# "earth_sidereal_day__hour_count"
# "earth_sidereal_day__duration"
# "earth__rotation_period" = "sidereal_day"
#
# "earth_stellar_day__period" ??
# "earth_stellar_day__duration" ??
#
#------------------------------------------------------------------
# For "earth__rotation_rate", it seems this should be based on
# the sidereal day (23.93 hours) instead of the mean solar day.
#------------------------------------------------------------------
# There are at least a few online sources that use both terms:
# "equivalent latitude" and "equivalent longitude". See:
# "The Construction and Application of a Martian Snowpack Model".
#------------------------------------------------------------------
# Adopt the little-used term: "topographic_sunrise" ?
# Or maybe "illuminated_topography", or "local_sunrise" ??
#------------------------------------------------------------------
# For angle relations between the earth and the sun, should we
# just use the adjective "solar" in the quantity name or include
# sun in the object name? We could also use terms like:
# earth_to_sun__declination_angle
# earth_to_sun__right_ascension_angle
#
#------------------------------------------------------------------
# The adjective "local" in "earth_local_apparent_noon__time"
# may be helpful in other contexts such as:
# 'earth__local_longitude' and 'land_surface__local_elevation'.
#------------------------------------------------------------------
# 'earth__autumnal_equinox_date',
# 'earth__autumnal_equinox_time',
# 'earth_axis__ecliptic_tilt_angle', # tilt_angle
# 'earth__julian_day_number', ########
# 'earth__julian_day_angle',
# 'earth__local_apparent_noon_time'
# 'earth__mean_radius',
# 'earth__mean_solar_day_duration', # (exactly 24 hours)
# 'earth_orbit__eccentricity',
# 'earth_orbit__period', # (one year)
# 'earth__perihelion_julian_day', ######
# 'earth__rotation_period', ######
# 'earth__rotation_rate', # Omega ###### What about Angular Velocity ?
# 'earth__sidereal_day_duration', # (one rotation = 23.934470 hours)
# 'earth__solar_declination_angle',
# 'earth__solar_hour_angle',
# 'earth__solar_irradiation_constant', ## (or "insolation_constant" ??)
# 'earth__solar_right_ascension_angle',
# 'earth__solar_vertical_angle', (complement of zenith angle)
# 'earth__solar_zenith_angle',
# 'earth__stellar_day_duration', # (relative to the "fixed stars")
# 'earth__summer_solstice_date',
# 'earth__summer_solstice_time',
# 'earth__topographic_sunrise_equivalent_latitude',
# 'earth__topographic_sunrise_equivalent_longitude', (flat_lon + offset)
# 'earth__topographic_sunrise_equivalent_longitude_offset',
# 'earth__topographic_sunrise_time',
# 'earth__topographic_sunset_time',
# 'earth_true_solar_noon___time', #####
# 'earth_clock__true_solar_noon_time'
# 'earth__vernal_equinox_date',
# 'earth__vernal_equinox_time',
# 'earth__winter_solstice_date',
# 'earth__winter_solstice_time',
#
# What about a "slope_corrected" or "topographic" version of K_dir ?
#
# 'land_surface__backscattered_shortwave_irradiation_flux', # K_bs
# 'land_surface__diffuse_shortwave_irradiation_flux', # K_dif
# 'land_surface__direct_shortwave_irradiation_flux', # K_dir
# 'land_surface__global_shortwave_irradiation_flux', # K_glob = K_dif + K_dir
#------------------------------------------------------------------
#------------------------------------------------------------------
# Maybe we should rename "z" to "z_ref" and "uz" to "uz_ref" ?
#------------------------------------------------------------------
_var_name_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'rho_snow',
'snowpack__depth': 'h_snow',
'snowpack__liquid-equivalent_depth': 'h_swe',
'snowpack__melt_volume_flux': 'SM', # (MR is used for ice)
#-----------------------------------------------------------------
#'atmosphere__optical_path_length_ratio': 'M_opt', # (in solar_funcs.py)
# 'atmosphere__von_karman_constant': 'kappa',
'atmosphere_aerosol_dust__reduction_of_transmittance': 'dust_atten',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'W_p', #########
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': 'canopy_factor',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': 'cloud_factor',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'De',
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'Dh',
'atmosphere_bottom_air__emissivity': 'em_air',
'atmosphere_bottom_air__mass-per-volume_density': 'rho_air',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'Cp_air',
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'Dn',
'atmosphere_bottom_air__pressure': 'p0',
'atmosphere_bottom_air__temperature': 'T_air',
'atmosphere_bottom_air_flow__bulk_richardson_number': 'Ri',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'z0_air', ## (not "z0")
'atmosphere_bottom_air_flow__reference-height_speed': 'uz',
'atmosphere_bottom_air_flow__speed_reference_height': 'z',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'Qe',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'Qh',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'T_dew',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'e_air',
'atmosphere_bottom_air_water-vapor__relative_saturation': 'RH',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'e_sat_air',
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'vol_P',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'P_max',
'atmosphere_water__precipitation_leq-volume_flux': 'P',
'atmosphere_water__rainfall_volume_flux': 'P_rain',
'atmosphere_water__snowfall_leq-volume_flux': 'P_snow',
'earth__standard_gravity_constant': 'g',
'land_surface__albedo': 'albedo',
'land_surface__aspect_angle': 'alpha',
'land_surface__emissivity': 'em_surf',
'land_surface__latitude': 'lat_deg',
'land_surface__longitude': 'lon_deg',
'land_surface__slope_angle': 'beta',
'land_surface__temperature': 'T_surf',
# 'land_surface_air__temperature': 'T_surf',
'land_surface_air_water-vapor__partial_pressure': 'e_surf',
'land_surface_air_water-vapor__saturated_partial_pressure': 'e_sat_surf',
'land_surface_net-longwave-radiation__energy_flux': 'Qn_LW',
'land_surface_net-shortwave-radiation__energy_flux': 'Qn_SW',
'land_surface_net-total-energy__energy_flux': 'Q_sum',
'model__time_step': 'dt',
'physics__stefan_boltzmann_constant': 'sigma',
'physics__von_karman_constant': 'kappa',
'water__mass-specific_latent_fusion_heat': 'Lf',
'water__mass-specific_latent_vaporization_heat': 'Lv',
'water-liquid__mass-per-volume_density': 'rho_H2O' }
#-----------------------------------------------------------------
# Note: The "update()" function calls several functions with the
# MBAR keyword set to get units of "mbar" vs. "kPa".
#-----------------------------------------------------------------
# Note: We need to be careful with whether units are C or K,
# for all "thermal" quantities (e.g. thermal_capacity).
#-----------------------------------------------------------------
# Note: ARHYTHM had 3 "bulk exchange coefficients" that are all
# equal and therefore have the same units of [m s-1].
# Double-check that this is what is intended. ##########
#-----------------------------------------------------------------
# Note: "atmosphere_column_water__liquid_equivalent_depth" has
# units of "cm", as in Dingman's book. Make sure it gets
# used correctly in equations.
#-----------------------------------------------------------------
# Note: slope_angle and aspect_angle have units of RADIANS.
# aspect_angle is measured CW from north.
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east. They are converted for use here.
#-----------------------------------------------------------------
_var_units_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'kg m-3',
'snowpack__depth': 'm',
'snowpack__liquid-equivalent_depth': 'm',
'snowpack__melt_volume_flux': 'm s-1',
#-------------------------------------------------------------
# 'atmosphere__optical_path_length_ratio': '1',
# 'atmosphere__von_karman_constant': '1',
'atmosphere_aerosol_dust__reduction_of_transmittance': '1',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'cm', # (see Notes above)
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': '1',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': '1',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__emissivity': '1',
'atmosphere_bottom_air__mass-per-volume_density': 'kg m-3',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'J kg-1 K-1', # (see Notes above)
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__pressure': 'mbar',
'atmosphere_bottom_air__temperature': 'deg_C', # (see Notes above)
'atmosphere_bottom_air_flow__bulk_richardson_number': '1',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'm',
'atmosphere_bottom_air_flow__reference-height_speed': 'm s-1',
'atmosphere_bottom_air_flow__speed_reference_height': 'm',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'deg_C',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'mbar', # (see Notes above)
'atmosphere_bottom_air_water-vapor__relative_saturation': '1',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'mbar', # (see Notes above)
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'm3',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__rainfall_volume_flux': 'm s-1', # (see Notes above)
'atmosphere_water__snowfall_leq-volume_flux': 'm s-1', # (see Notes above)
'earth__standard_gravity_constant': 'm s-2',
'land_surface__albedo': '1',
'land_surface__aspect_angle': 'radians', # (see Notes above)
'land_surface__emissivity': '1',
'land_surface__latitude': 'degrees',
'land_surface__longitude': 'degrees',
'land_surface__slope_angle': 'radians',
'land_surface__temperature': 'deg_C',
# 'land_surface_air__temperature': 'deg_C',
'land_surface_air_water-vapor__partial_pressure': 'mbar',
'land_surface_air_water-vapor__saturated_partial_pressure': 'mbar',
'land_surface_net-longwave-radiation__energy_flux': 'W m-2',
'land_surface_net-shortwave-radiation__energy_flux': 'W m-2',
'land_surface_net-total-energy__energy_flux': 'W m-2',
'model__time_step': 's',
'physics__stefan_boltzmann_constant': 'W m-2 K-4',
'physics__von_karman_constant': '1',
'water__mass-specific_latent_fusion_heat': 'J kg-1',
'water__mass-specific_latent_vaporization_heat': 'J kg-1',
'water-liquid__mass-per-volume_density': 'kg m-3' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
## def get_var_type(self, long_var_name):
##
## #---------------------------------------
## # So far, all vars have type "double",
## # but use the one in BMI_base instead.
## #---------------------------------------
## return 'float64'
##
## # get_var_type()
#-------------------------------------------------------------------
def set_constants(self):
#---------------------------------
# Define some physical constants
#---------------------------------
self.g = np.float64(9.81) # [m s-2, gravity]
self.kappa = np.float64(0.408) # [1] (von Karman)
self.rho_H2O = np.float64(1000) # [kg m-3]
self.rho_air = np.float64(1.2614) # [kg m-3]
self.Cp_air = np.float64(1005.7) # [J kg-1 K-1]
self.Lv = np.float64(2500000) # [J kg-1] Latent heat of vaporiz.
self.Lf = np.float64(334000) # [J kg-1 = W s kg-1], Latent heat of fusion
self.sigma = np.float64(5.67E-8) # [W m-2 K-4] (Stefan-Boltzman constant)
self.C_to_K = np.float64(273.15) # (add to convert deg C to K)
self.twopi = np.float64(2) * np.pi
self.one_seventh = np.float64(1) / 7
self.hours_per_day = np.float64(24)
self.secs_per_day = np.float64(3600) * self.hours_per_day
#---------------------------
# See update_latent_heat()
#-----------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000, p. 1002).
# Is this constant actually the dimensionless ratio of
# the molecular weight of water to that of dry air ?
#-----------------------------------------------------------
## self.latent_heat_constant = np.float64(0.622)
self.latent_heat_constant = np.float64(0.662)
#----------------------------------------
# Constants related to precip (9/24/09)
#----------------------------------------
self.mmph_to_mps = (np.float64(1) / np.float64(3600000))
self.mps_to_mmph = np.float64(3600000)
self.forever = np.float64(999999999) # [minutes]
#------------------------------------------------
# Only needed for method 1, where all rates and
# durations are read as 1D arrays from GUI.
# Method 1 may be removed in a future version.
#------------------------------------------------
## self.method1_rates = None
## self.method1_durations = None
## self.method1_n_rates = 0
# set_constants()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
if not(SILENT):
print ' '
print 'Meteorology component: Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
## print ' Calling read_grid_info()...'
self.read_grid_info()
## print ' Calling initialize_basin_vars()...'
self.initialize_basin_vars() # (5/14/10)
#----------------------------------------------------
# NB! This read_input_files() uses self.time_index.
# Also needs to be before "Disabled" test.
#----------------------------------------------------
## print ' Calling initialize_time_vars()...'
self.initialize_time_vars()
#-------------------------------------------------
# (5/19/12) This makes P "mutable", which allows
# its updated values to be seen by any component
# that has a reference to it.
#-------------------------------------------------
# Write a "initialize_computed_vars()" method?
#-------------------------------------------------
# self.P = self.initialize_scalar(0, dtype='float64') # @mdpiper
# self.P_rain = self.initialize_scalar(0, dtype='float64')
# self.P_snow = self.initialize_scalar(0, dtype='float64')
self.P = np.zeros((self.ny,self.nx), dtype=float)
self.P_rain = np.zeros((self.ny,self.nx), dtype=float)
self.P_snow = np.zeros((self.ny,self.nx), dtype=float)
#------------------------------------------------------
# NB! "Sample steps" must be defined before we return
# Check all other process modules.
#------------------------------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print 'Meteorology component: Disabled.'
## self.P = np.float64(0)
self.e_air = self.initialize_scalar(0, dtype='float64')
self.e_surf = self.initialize_scalar(0, dtype='float64')
self.em_air = self.initialize_scalar(0, dtype='float64')
self.Qn_SW = self.initialize_scalar(0, dtype='float64')
self.Qn_LW = self.initialize_scalar(0, dtype='float64')
self.Q_sum = self.initialize_scalar(0, dtype='float64')
self.Qc = self.initialize_scalar(0, dtype='float64')
self.Qa = self.initialize_scalar(0, dtype='float64')
self.DONE = True
self.status = 'initialized'
return
#-----------------------------------------------
# Read from files as needed to initialize vars
#-----------------------------------------------
self.open_input_files()
self.read_input_files() # (initializes P)
# Some output variables aren't defined until update() is called.
# Initialize them here, instead. (@mdpiper, 9/8/15)
try:
self.Ri
except AttributeError:
self.Ri = np.zeros_like(self.T_air)
## self.check_input_types() # (not needed so far)
#-----------------------
# Initialize variables
#-----------------------
## print ' Calling initialize_computed_vars()...'
self.initialize_computed_vars() # (after read_input_files)
if not(self.PRECIP_ONLY):
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
## def update(self, dt=-1.0, time_seconds=None):
def update(self, dt=-1.0):
#----------------------------------------------------------
# Note: The read_input_files() method is first called by
# the initialize() method. Then, the update()
# method is called one or more times, and it calls
# other update_*() methods to compute additional
# variables using input data that was last read.
# Based on this pattern, read_input_files() should
# be called at end of update() method as done here.
# If the input files don't contain any additional
# data, the last data read persists by default.
#----------------------------------------------------------
if (self.comp_status == 'Disabled'): return
self.status = 'updating' # (OpenMI 2.0 convention)
#-------------------------------------------
# Update computed values related to precip
#-------------------------------------------
self.update_P_integral()
self.update_P_max()
self.update_P_rain()
self.update_P_snow()
#-------------------------
# Update computed values
#-------------------------
if not(self.PRECIP_ONLY):
self.update_bulk_richardson_number()
self.update_bulk_aero_conductance()
self.update_sensible_heat_flux()
self.update_saturation_vapor_pressure(MBAR=True)
self.update_saturation_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_vapor_pressure(MBAR=True)
self.update_dew_point() ###
self.update_precipitable_water_content() ###
self.update_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_latent_heat_flux() # (uses e_air and e_surf)
self.update_conduction_heat_flux()
self.update_advection_heat_flux()
self.update_julian_day()
self.update_net_shortwave_radiation()
self.update_em_air()
self.update_net_longwave_radiation()
self.update_net_energy_flux() # (at the end)
#----------------------------------------
# Read next met vars from input files ?
#-------------------------------------------
# Note that read_input_files() is called
# by initialize() and these values must be
# used for "update" calls before reading
# new ones.
#-------------------------------------------
if (self.time_index > 0):
self.read_input_files()
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
# Components use own self.time_sec by default.
#-----------------------------------------------
if not(self.PRECIP_ONLY):
self.write_output_files()
## self.write_output_files( time_seconds )
#-----------------------------
# Update internal clock
# after write_output_files()
#-----------------------------
self.update_time( dt )
self.status = 'updated' # (OpenMI)
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
if (self.comp_status == 'Enabled'):
self.close_input_files() ## TopoFlow input "data streams"
if not(self.PRECIP_ONLY):
self.close_output_files()
self.status = 'finalized' # (OpenMI)
self.print_final_report(comp_name='Meteorology component')
#---------------------------
# Release all of the ports
#----------------------------------------
# Make this call in "finalize()" method
# of the component's CCA Imple file
#----------------------------------------
# self.release_cca_ports( port_names, d_services )
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#-----------------------------------------------
# Convert precip rate units from mm/h to m/s ?
#-----------------------------------------------
# NB! read_input_files() does this for files.
#-----------------------------------------------
if (self.P_type == 'Scalar'):
## print '######## self.P_type =', self.P_type
## print '######## type(self.P) =', type(self.P)
## print '######## self.P =', self.P
## print '######## Converting scalar P from MMPH to MPS.'
#-----------------------------------------------------
# (2/7/13) Must use "*=" here to preserve reference.
#-----------------------------------------------------
self.P *= self.mmph_to_mps
## self.P = self.P * self.mmph_to_mps
print 'Scalar rainrate set to:', self.P, ' [mmph]'
#---------------------------------
# Process the PRECIP_ONLY toggle
#---------------------------------
if not(hasattr(self, 'PRECIP_ONLY')):
self.PRECIP_ONLY = False
elif (self.PRECIP_ONLY.lower() == 'yes'):
self.PRECIP_ONLY = True
else:
self.PRECIP_ONLY = False
#---------------------------------------
# Print info message about PRECIP_ONLY
#---------------------------------------
if (self.PRECIP_ONLY):
print '-----------------------------------------'
print ' NOTE: Since PRECIP_ONLY = True, output'
print ' variables will not be computed'
print ' or saved to files.'
print '-----------------------------------------'
print' '
#----------------------------------------------------
# Toggle to use SATTERLUND or BRUTSAERT methods
# for computing e_air and em_air. (Not in GUI yet.)
#----------------------------------------------------
if not(hasattr(self, 'SATTERLUND')):
self.SATTERLUND = False
#---------------------------------------------
# Convert GMT_offset from string to int
# because GUI can't use ints in droplist yet
#---------------------------------------------
self.GMT_offset = np.int16( self.GMT_offset )
#------------------------------------------------
# Convert start_month from string to integer
# January should be 1. See solar.Julian_Day().
#------------------------------------------------
month_list = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.start_month = month_list.index( self.start_month ) + 1
#-------------------------------
# Initialize some more toggles
#-------------------------------
if not(hasattr(self, 'SAVE_QSW_GRIDS')):
self.SAVE_QSW_GRIDS = False
if not(hasattr(self, 'SAVE_QLW_GRIDS')):
self.SAVE_QLW_GRIDS = False
#-------------------------------------------
if not(hasattr(self, 'SAVE_QSW_PIXELS')):
self.SAVE_QSW_PIXELS = False
if not(hasattr(self, 'SAVE_QLW_PIXELS')):
self.SAVE_QLW_PIXELS = False
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def initialize_computed_vars(self):
#------------------------------------------------------
# Note: Some of these require "self.rti", which is
# only stored by read_grid_info() after the
# set_computed_input_vars() function is called.
# So these parts can't go there.
#------------------------------------------------------
#---------------------------------------
# Add self.in_directory to:
# slope_grid_file & aspect_grid_file
#---------------------------------------
self.slope_grid_file = (self.in_directory + self.slope_grid_file)
self.aspect_grid_file = (self.in_directory + self.aspect_grid_file)
#-------------------------------------------------
# Read slope grid & convert to slope angle, beta
# NB! RT slope grids have NaNs on edges.
#-------------------------------------------------
slopes = rtg_files.read_grid( self.slope_grid_file, self.rti,
RTG_type='FLOAT' )
beta = np.arctan( slopes )
beta = (self.twopi + beta) % self.twopi
#---------------------------------------------
w_nan = np.where( np.logical_not(np.isfinite(beta)) )
n_nan = np.size(w_nan[0])
if (n_nan != 0):
beta[ w_nan ] = np.float64(0)
#------------------------------------------------------------------
w_bad = np.where( np.logical_or( (beta < 0), (beta > np.pi / 2) ) )
n_bad = np.size(w_bad[0])
if (n_bad != 0):
msg = array(['ERROR: Some slope angles are out of range.', ' '])
for line in msg:
print line
## result = GUI_Message(msg, INFO=True, TITLE='ERROR MESSAGE')
return
self.beta = beta ######
#------------------------------------------------------
# Read aspect grid. Alpha must be CW from north.
# NB! RT aspect grids have NaNs on edges.
#---------------------------------------------------------
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east.
#---------------------------------------------------------
aspects = rtg_files.read_grid( self.aspect_grid_file, self.rti,
RTG_type='FLOAT' )
alpha = (np.pi / 2) - aspects
alpha = (self.twopi + alpha) % self.twopi
#-----------------------------------------------
w_nan = np.where( np.logical_not( np.isfinite(alpha) ) )
n_nan = np.size( w_nan[0] )
if (n_nan != 0):
alpha[ w_nan ] = np.float64(0)
self.alpha = alpha ######
#---------------------------
# Create lon and lat grids
#---------------------------
if (self.rti.pixel_geom == 0):
self.lon_deg = solar.Longitude_Grid( self.rti )
self.lat_deg = solar.Latitude_Grid( self.rti )
## print 'Lon grid ='
## print self.lon_deg
## print 'Lat grid ='
## print self.lat_deg
#-----------------------------
# Write grids to RTG files ?
#-----------------------------
## lon_file = (self.out_directory + self.site_prefix + '_lons.bin')
## rtg_files.write_grid( self.lon_deg, lon_file, self.rti )
## lat_file = (self.out_directory + self.site_prefix + '_lats.bin')
## rtg_files.write_grid( self.lat_deg, lat_file, self.rti )
else:
print 'SORRY: Cannot yet create lon and lat grids for'
print ' this DEM because it uses UTM coordinates.'
print ' Will use lat/lon for Denver, Colorado.'
print ' '
#--------------------------------------------
# For now, use scalar values for Denver, CO
#--------------------------------------------
self.lon_deg = np.float64( -104.9841667 )
self.lat_deg = np.float64( 39.7391667 )
## return
#-------------------------------------------------
# Initialize max precip rate with the first rate
#------------------------------------------------
# Note: Need this here because rate may be
# zero at the end of update_precip_rate()
#------------------------------------------------
# vol_P is used for mass balance check.
#------------------------------------------------
P_max = self.P.max() # (after read_input_files)
## self.P_max = self.P.max()
self.P_max = self.initialize_scalar( P_max, dtype='float64')
self.vol_P = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# For using new framework which embeds references from
# meteorology to snow, etc., these need to be defined
# in the initialize step. However, they will most likely
# change from scalar to grid during update, so we need to
# check that the reference isn't broken when the dtype
# changes. (5/17/12)
#----------------------------------------------------------
# These depend on grids alpha and beta, so will be grids.
#----------------------------------------------------------
self.Qn_SW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_LW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_tot = np.zeros([self.ny, self.nx], dtype='float64')
self.Q_sum = np.zeros([self.ny, self.nx], dtype='float64')
#----------------------------------------------------------
# self.Qn_SW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_LW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_tot = self.initialize_scalar( 0, dtype='float64')
# self.Q_sum = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# These may be scalars or grids.
#---------------------------------
self.Qe = self.initialize_scalar( 0, dtype='float64')
self.e_air = self.initialize_scalar( 0, dtype='float64')
self.e_surf = self.initialize_scalar( 0, dtype='float64')
self.em_air = self.initialize_scalar( 0, dtype='float64')
self.Qc = self.initialize_scalar( 0, dtype='float64')
self.Qa = self.initialize_scalar( 0, dtype='float64')
#------------------------------------
# Initialize the decimal Julian day
#------------------------------------
self.julian_day = solar.Julian_Day( self.start_month,
self.start_day,
self.start_hour )
## print ' julian_day =', self.julian_day
# initialize_computed_vars()
#-------------------------------------------------------------------
def update_P_integral(self):
#---------------------------------------------------
# Notes: This can be used for mass balance checks,
# such as now done by update_mass_totals()
# in topoflow.py. The "dt" here should be
# TopoFlow's "main dt" vs. the process dt.
# dV[i] = P[i] * da[i] * dt, dV = sum(dV[i])
#---------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_integral()...'
#------------------------------------------------
# Update mass total for P, sum over all pixels
#------------------------------------------------
volume = np.double(self.P * self.da * self.dt) # [m^3]
if (np.size(volume) == 1):
self.vol_P += (volume * self.rti.n_pixels)
else:
self.vol_P += np.sum(volume)
# update_P_integral()
#-------------------------------------------------------------------
def update_P_max(self):
if (self.DEBUG):
print 'Calling update_P_max()...'
#-----------------------------------------
# Save the maximum precip. rate in [m/s]
#-------------------------------------------
# Must use "fill()" to preserve reference.
#-------------------------------------------
try:
self.P_max.fill( np.maximum(self.P_max, self.P.max()) )
except ValueError:
self.P_max[:, :] = np.maximum(self.P_max, self.P.max())
## self.P_max = np.maximum(self.P_max, self.P.max())
### print '##### P =', self.P
# update_P_max()
#-------------------------------------------------------------------
def update_P_rain(self):
#-----------------------------------------------------------
# Note: This routine is written so that it doesn't matter
# whether P and T_air are grids or scalars.
# For scalars: 1.5 * True = 1.5, 1.5 * False = 0.
# Here are the possible combinations for checking.
#-----------------------------------------------------------
# P T_air P_rain
#----------------------------
# scalar scalar scalar
# scalar grid grid
# grid scalar grid
# grid grid grid
#----------------------------
if (self.DEBUG):
print 'Calling update_P_rain()...'
#-------------------------------------------------
# P_rain is the precip that falls as liquid that
# can contribute to runoff production.
#-------------------------------------------------
# P_rain is used by channel_base.update_R.
#-------------------------------------------------
P_rain = self.P * (self.T_air > 0)
if (np.ndim( self.P_rain ) == 0):
self.P_rain.fill( P_rain ) #### (mutable scalar)
else:
self.P_rain[:] = P_rain
if (self.DEBUG):
if (self.P_rain.max() > 0):
print ' >> Rain is falling...'
#--------------
# For testing
#--------------
## print 'shape(P) =', shape(self.P)
## print 'shape(T_air) =', shape(self.T_air)
## print 'shape(P_rain) =', shape(self.P_rain)
## print 'T_air =', self.T_air
#########################################
#### Old note, to remember for later.
#--------------------------------------------------
# (2/7/13) We must use "*=" to preserve reference
# if P is a "mutable scalar".
#--------------------------------------------------
# update_P_rain()
#-------------------------------------------------------------------
def update_P_snow(self):
#----------------------------------------------------
# Notes: Rain and snow may fall simultaneously at
# different grid cells in the model domain.
#----------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_snow()...'
#-------------------------------------------------
# P_snow is the precip that falls as snow or ice
# that contributes to the snow depth. This snow
# may melt to contribute to runoff later on.
#-------------------------------------------------
# P_snow is used by snow_base.update_depth.
#-------------------------------------------------
P_snow = self.P * (self.T_air <= 0)
if (np.ndim( self.P_snow ) == 0):
self.P_snow.fill( P_snow ) #### (mutable scalar)
else:
self.P_snow[:] = P_snow
if (self.DEBUG):
if (self.P_snow.max() > 0):
print ' >> Snow is falling...'
# update_P_snow()
#-------------------------------------------------------------------
def update_bulk_richardson_number(self):
if (self.DEBUG):
print 'Calling update_bulk_richardson_number()...'
#---------------------------------------------------------------
# (9/6/14) Found a typo in the Zhang et al. (2000) paper,
# in the definition of Ri. Also see Price and Dunne (1976).
# We should have (Ri > 0) and (T_surf > T_air) when STABLE.
# This also removes problems/singularities in the corrections
# for the stable and unstable cases in the next function.
#---------------------------------------------------------------
# Notes: Other definitions are possible, such as the one given
# by Dingman (2002, p. 599). However, this one is the
# one given by Zhang et al. (2000) and is meant for use
# with the stability criterion also given there.
#---------------------------------------------------------------
#### top = self.g * self.z * (self.T_air - self.T_surf) # BUG.
top = self.g * self.z * (self.T_surf - self.T_air)
bot = (self.uz)**2.0 * (self.T_air + np.float64(273.15))
self.Ri = (top / bot)
# update_bulk_richardson_number()
#-------------------------------------------------------------------
def update_bulk_aero_conductance(self):
if (self.DEBUG):
print 'Calling update_bulk_aero_conductance()...'
#----------------------------------------------------------------
# Notes: Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
# h_snow = snow depth [m]
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# z = height that has wind speed uz [m]
# uz = wind speed at height z [m/s]
# kappa = 0.408 = von Karman's constant [unitless]
# RI = Richardson's number (see function)
#----------------------------------------------------------------
h_snow = self.h_snow # (ref from new framework)
#---------------------------------------------------
# Compute bulk exchange coeffs (neutral stability)
# using the logarithm "law of the wall".
#-----------------------------------------------------
# Note that "arg" = the drag coefficient (unitless).
#-----------------------------------------------------
arg = self.kappa / np.log((self.z - h_snow) / self.z0_air)
Dn = self.uz * (arg)**2.0
#-----------------------------------------------
# NB! Dn could be a scalar or a grid, so this
# must be written to handle both cases.
# Note that WHERE can be used on a scalar:
# IDL> a = 1
# IDL> print, size(a)
# IDL> w = where(a ge 1, nw)
# IDL> print, nw
# IDL> a[w] = 2
# IDL> print, a
# IDL> print, size(a)
#-----------------------------------------------
###########################################################
# NB! If T_air and T_surf are both scalars, then next
# few lines won't work because we can't index the
# resulting empty "w" (even if T_air == T_surf).
###########################################################
## w = np.where(self.T_air != self.T_surf)
## nw = np.size(w[0])
## ## nw = np.size(w,0) # (doesn't work if 2 equal scalars)
#----------------------------------------------------------
T_AIR_SCALAR = (np.ndim( self.T_air ) == 0)
T_SURF_SCALAR = (np.ndim( self.T_surf ) == 0)
if (T_AIR_SCALAR and T_SURF_SCALAR):
if (self.T_air == self.T_surf): nw=1
else: nw=0
else:
w = np.where(self.T_air != self.T_surf)
nw = np.size(w[0])
if (nw == 0):
#--------------------------------------------
# All pixels are neutral. Set Dh = De = Dn.
#--------------------------------------------
self.Dn = Dn
self.Dh = Dn
self.De = Dn
return
#-------------------------------------
# One or more pixels are not neutral
# so make a correction using RI
#---------------------------------------------
# NB! RI could be a grid when Dn is a
# scalar, and this will change Dn to a grid.
#---------------------------------------------
# Ri = Richardson_Number(z, uz, T_air, T_surf)
#--------------------------------------------
# Before 12/21/07. Has bug if RI is a grid
#--------------------------------------------
# w_stable = where(*T_air gt *T_surf, n_stable)
# if (n_stable ne 0) then begin
# Dn[w_stable] = Dn[w_stable]/(1d + (10d * RI))
# endif
# w_unstable = where(*T_air lt *T_surf, n_unstable)
# if (n_unstable ne 0) then begin
#----------------------------------------------
# Multiplication and substraction vs. opposites
# for the stable case. Zhang et al. (2000)
# Hopefully not just a typo.
#----------------------------------------------
# Dn[w_unstable] = Dn[w_unstable]*(1d - (10d * self.Ri))
# endif
#-----------------
# After 12/21/07
#------------------------------------------------------------
# If T_air, T_surf or uz is a grid, then Ri will be a grid.
# This version makes only one call to WHERE, so its faster.
#------------------------------------------------------------
# Multiplication and substraction vs. opposites for the
# stable case (Zhang et al., 2000); hopefully not a typo.
# It plots as a smooth curve through Ri=0.
#------------------------------------------------------------
# (9/7/14) Modified so that Dn is saved, but Dh = De.
#------------------------------------------------------------
Dh = Dn.copy() ### (9/7/14. Save Dn also.)
nD = np.size( Dh )
nR = np.size( self.Ri )
if (nR > 1):
#--------------------------
# Case where RI is a grid
#--------------------------
ws = np.where( self.Ri > 0 )
ns = np.size( ws[0] )
wu = np.where( np.invert(self.Ri > 0) )
nu = np.size( wu[0] )
if (nD == 1):
#******************************************
# Convert Dn to a grid here or somewhere
# Should stop with an error message
#******************************************
dum = np.int16(0)
if (ns != 0):
#----------------------------------------------------------
# If (Ri > 0), or (T_surf > T_air), then STABLE. (9/6/14)
#----------------------------------------------------------
Dh[ws] = Dh[ws] / (np.float64(1) + (np.float64(10) * self.Ri[ws]))
if (nu != 0):
Dh[wu] = Dh[wu] * (np.float64(1) - (np.float64(10) * self.Ri[wu]))
else:
#----------------------------
# Case where Ri is a scalar
#--------------------------------
# Works if Dh is grid or scalar
#--------------------------------
if (self.Ri > 0):
Dh = Dh / (np.float64(1) + (np.float64(10) * self.Ri))
else:
Dh = Dh * (np.float64(1) - (np.float64(10) * self.Ri))
#----------------------------------------------------
# NB! We currently assume that these are all equal.
#----------------------------------------------------
self.Dn = Dn
self.Dh = Dh
self.De = Dh ## (assumed equal)
# update_bulk_aero_conductance()
#-------------------------------------------------------------------
def update_sensible_heat_flux(self):
#--------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
# Dh is returned by Bulk_Exchange_Coeff function
# and is not a pointer.
#--------------------------------------------------------
if (self.DEBUG):
print 'Callilng update_sensible_heat_flux()...'
#---------------------
# Physical constants
#---------------------
# rho_air = 1.225d ;[kg m-3, at sea-level]
# Cp_air = 1005.7 ;[J kg-1 K-1]
#-----------------------------
# Compute sensible heat flux
#-----------------------------
delta_T = (self.T_air - self.T_surf)
self.Qh = (self.rho_air * self.Cp_air) * self.Dh * delta_T
# update_sensible_heat_flux()
#-------------------------------------------------------------------
def update_saturation_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_saturation_vapor_pressure()...'
#----------------------------------------------------------------
#Notes: Saturation vapor pressure is a function of temperature.
# T is temperature in Celsius. By default, the method
# of Brutsaert (1975) is used. However, the SATTERLUND
# keyword is set then the method of Satterlund (1979) is
# used. When plotted, they look almost identical. See
# the Compare_em_air_Method routine in Qnet_file.pro.
# Dingman (2002) uses the Brutsaert method.
# Liston (1995, EnBal) uses the Satterlund method.
# By default, the result is returned with units of kPa.
# Set the MBAR keyword for units of millibars.
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#----------------------------------------------------------------
#NB! Here, 237.3 is correct, and not a misprint of 273.2.
# See footnote on p. 586 in Dingman (Appendix D).
#----------------------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']):
## return
T = self.T_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']):
## return
T = self.T_air
if not(self.SATTERLUND):
#------------------------------
# Use Brutsaert (1975) method
#------------------------------
term1 = (np.float64(17.3) * T) / (T + np.float64(237.3))
e_sat = np.float64(0.611) * np.exp(term1) # [kPa]
else:
#-------------------------------
# Use Satterlund (1979) method ############ DOUBLE CHECK THIS (7/26/13)
#-------------------------------
term1 = np.float64(2353) / (T + np.float64(273.15))
e_sat = np.float64(10) ** (np.float64(11.4) - term1) # [Pa]
e_sat = (e_sat / np.float64(1000)) # [kPa]
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e_sat = (e_sat * np.float64(10)) # [mbar]
if (SURFACE):
self.e_sat_surf = e_sat
else:
self.e_sat_air = e_sat
# update_saturation_vapor_pressure()
#-------------------------------------------------------------------
def update_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_vapor_pressure()...'
#---------------------------------------------------
# Notes: T is temperature in Celsius
# RH = relative humidity, in [0,1]
# by definition, it equals (e / e_sat)
# e has units of kPa.
#---------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_air
e = (self.RH * e_sat)
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e = (e * np.float64(10)) # [mbar]
if (SURFACE):
self.e_surf = e
else:
self.e_air = e
# update_vapor_pressure()
#-------------------------------------------------------------------
def update_dew_point(self):
if (self.DEBUG):
print 'Calling update_dew_point()...'
#-----------------------------------------------------------
# Notes: The dew point is a temperature in degrees C and
# is a function of the vapor pressure, e_air.
# Vapor pressure is a function of air temperature,
# T_air, and relative humidity, RH.
# The formula used here needs e_air in kPa units.
# See Dingman (2002, Appendix D, p. 587).
#-----------------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
log_vp = np.log( e_air_kPa )
top = log_vp + np.float64(0.4926)
bot = np.float64(0.0708) - (np.float64(0.00421) * log_vp)
self.T_dew = (top / bot) # [degrees C]
# update_dew_point()
#-------------------------------------------------------------------
def update_precipitable_water_content(self):
if (self.DEBUG):
print 'Calling update_precipitable_water_content()...'
#------------------------------------------------------------
# Notes: W_p is precipitable water content in centimeters,
# which depends on air temp and relative humidity.
#------------------------------------------------------------
arg = np.float64( 0.0614 * self.T_dew )
self.W_p = np.float64(1.12) * np.exp( arg ) # [cm]
# update_precipitable_water_content()
#-------------------------------------------------------------------
def update_latent_heat_flux(self):
if (self.DEBUG):
print 'Calling update_latent_heat_flux()...'
#--------------------------------------------------------
# Notes: Pressure units cancel out because e_air and
# e_surf (in numer) have same units (mbar) as
# p0 (in denom).
#--------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000).
#--------------------------------------------------------
const = self.latent_heat_constant
factor = (self.rho_air * self.Lv * self.De)
delta_e = (self.e_air - self.e_surf)
self.Qe = factor * delta_e * (const / self.p0)
# update_latent_heat_flux()
#-------------------------------------------------------------------
def update_conduction_heat_flux(self):
if (self.DEBUG):
print 'Calling update_conduction_heat_flux()...'
#-----------------------------------------------------------------
# Notes: The conduction heat flux from snow to soil for computing
# snowmelt energy, Qm, is close to zero.
# However, the conduction heat flux from surface and sub-
# surface for computing Qet is given by Fourier's Law,
# namely Qc = Ks(Tx - Ts)/x.
# All the Q's have units of W/m^2 = J/(m^2 s).
#-----------------------------------------------------------------
pass # (initialized at start)
# update_conduction_heat_flux()
#-------------------------------------------------------------------
def update_advection_heat_flux(self):
if (self.DEBUG):
print 'Calling update_advection_heat_flux()...'
#------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
#------------------------------------------------------
pass # (initialized at start)
# update_advection_heat_flux()
#-------------------------------------------------------------------
def update_julian_day(self):
if (self.DEBUG):
print 'Calling update_julian_day()...'
#----------------------------------
# Update the *decimal* Julian day
#----------------------------------
self.julian_day += (self.dt / self.secs_per_day) # [days]
#------------------------------------------
# Compute the offset from True Solar Noon
# clock_hour is in 24-hour military time
# but it can have a decimal part.
#------------------------------------------
dec_part = self.julian_day - np.int16(self.julian_day)
clock_hour = dec_part * self.hours_per_day
## print ' Computing solar_noon...'
solar_noon = solar.True_Solar_Noon( self.julian_day,
self.lon_deg,
self.GMT_offset )
## print ' Computing TSN_offset...'
self.TSN_offset = (clock_hour - solar_noon) # [hours]
# update_julian_day()
#-------------------------------------------------------------------
def update_net_shortwave_radiation(self):
#---------------------------------------------------------
# Notes: If time is before local sunrise or after local
# sunset then Qn_SW should be zero.
#---------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_shortwave_radiation()...'
#--------------------------------
# Compute Qn_SW for this time
#--------------------------------
Qn_SW = solar.Clear_Sky_Radiation( self.lat_deg,
self.julian_day,
self.W_p,
self.TSN_offset,
self.alpha,
self.beta,
self.albedo,
self.dust_atten )
if (np.ndim( self.Qn_SW ) == 0):
self.Qn_SW.fill( Qn_SW ) #### (mutable scalar)
else:
self.Qn_SW[:] = Qn_SW # [W m-2]
# update_net_shortwave_radiation()
#-------------------------------------------------------------------
def update_em_air(self):
if (self.DEBUG):
print 'Calling update_em_air()...'
#---------------------------------------------------------
# NB! The Brutsaert and Satterlund formulas for air
# emissivity as a function of air temperature are in
# close agreement; see compare_em_air_methods().
# However, we must pay close attention to whether
# equations require units of kPa, Pa, or mbar.
#
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#---------------------------------------------------------
# NB! Temperatures are assumed to be given with units
# of degrees Celsius and are converted to Kelvin
# wherever necessary by adding C_to_K = 273.15.
#
# RH = relative humidity [unitless]
#---------------------------------------------------------
# NB! I'm not sure about how F is added at end because
# of how the equation is printed in Dingman (2002).
# But it reduces to other formulas as it should.
#---------------------------------------------------------
T_air_K = self.T_air + self.C_to_K
if not(self.SATTERLUND):
#-----------------------------------------------------
# Brutsaert (1975) method for computing emissivity
# of the air, em_air. This formula uses e_air with
# units of kPa. (From Dingman (2002, p. 196).)
# See notes for update_vapor_pressure().
#-----------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
F = self.canopy_factor
C = self.cloud_factor
term1 = (1.0 - F) * 1.72 * (e_air_kPa / T_air_K) ** self.one_seventh
term2 = (1.0 + (0.22 * C ** 2.0))
self.em_air = (term1 * term2) + F
else:
#--------------------------------------------------------
# Satterlund (1979) method for computing the emissivity
# of the air, em_air, that is intended to "correct
# apparent deficiencies in this formulation at air
# temperatures below 0 degrees C" (see G. Liston)
# Liston cites Aase and Idso(1978), Satterlund (1979)
#--------------------------------------------------------
e_air_mbar = self.e_air
eterm = np.exp(-1 * (e_air_mbar)**(T_air_K / 2016) )
self.em_air = 1.08 * (1.0 - eterm)
#--------------------------------------------------------------
# Can't do this yet. em_air is always initialized scalar now
# but may change to grid on assignment. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.em_air ) == 0):
# self.em_air.fill( em_air ) #### (mutable scalar)
# else:
# self.em_air[:] = em_air
# update_em_air()
#-------------------------------------------------------------------
def update_net_longwave_radiation(self):
#----------------------------------------------------------------
# Notes: Net longwave radiation is computed using the
# Stefan-Boltzman law. All four data types
# should be allowed (scalar, time series, grid or
# grid stack).
#
# Qn_LW = (LW_in - LW_out)
# LW_in = em_air * sigma * (T_air + 273.15)^4
# LW_out = em_surf * sigma * (T_surf + 273.15)^4
#
# Temperatures in [deg_C] must be converted to
# [K]. Recall that absolute zero occurs at
# 0 [deg_K] or -273.15 [deg_C].
#
#----------------------------------------------------------------
# First, e_air is computed as:
# e_air = RH * 0.611 * exp[(17.3 * T_air) / (T_air + 237.3)]
# Then, em_air is computed as:
# em_air = (1 - F) * 1.72 * [e_air / (T_air + 273.15)]^(1/7) *
# (1 + 0.22 * C^2) + F
#----------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_longwave_radiation()...'
#--------------------------------
# Compute Qn_LW for this time
#--------------------------------
T_air_K = self.T_air + self.C_to_K
T_surf_K = self.T_surf + self.C_to_K
LW_in = self.em_air * self.sigma * (T_air_K)** 4.0
LW_out = self.em_surf * self.sigma * (T_surf_K)** 4.0
LW_out = LW_out + ((1.0 - self.em_surf) * LW_in)
self.Qn_LW = (LW_in - LW_out) # [W m-2]
#--------------------------------------------------------------
# Can't do this yet. Qn_LW is always initialized grid now
# but will often be created above as a scalar. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.Qn_LW ) == 0):
# self.Qn_LW.fill( Qn_LW ) #### (mutable scalar)
# else:
# self.Qn_LW[:] = Qn_LW # [W m-2]
# update_net_longwave_radiation()
#-------------------------------------------------------------------
def update_net_total_radiation(self):
#-----------------------------------------------
# Notes: Added this on 9/11/14. Not used yet.
#------------------------------------------------------------
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
#------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_total_radiation()...'
Qn_tot = self.Qn_SW + self.Qn_LW # [W m-2]
if (np.ndim( self.Qn_tot ) == 0):
self.Qn_tot.fill( Qn_tot ) #### (mutable scalar)
else:
self.Qn_tot[:] = Qn_tot # [W m-2]
# update_net_total_radiation()
#-------------------------------------------------------------------
def update_net_energy_flux(self):
if (self.DEBUG):
print 'Calling update_net_energy_flux()...'
#------------------------------------------------------
# Notes: Q_sum is used by "snow_energy_balance.py".
#------------------------------------------------------
# Qm = energy used to melt snowpack (if > 0)
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
# Qh = sensible heat flux from turbulent convection
# between snow surface and air
# Qe = latent heat flux from evaporation, sublimation,
# and condensation
# Qa = energy advected by moving water (i.e. rainfall)
# (ARHYTHM assumes this to be negligible; Qa=0.)
# Qc = energy flux via conduction from snow to soil
# (ARHYTHM assumes this to be negligible; Qc=0.)
# Ecc = cold content of snowpack = amount of energy
# needed before snow can begin to melt [J m-2]
# All Q's here have units of [W m-2].
# Are they all treated as positive quantities ?
# rho_air = density of air [kg m-3]
# rho_snow = density of snow [kg m-3]
# Cp_air = specific heat of air [J kg-1 K-1]
# Cp_snow = heat capacity of snow [J kg-1 K-1]
# = ???????? = specific heat of snow
# Kh = eddy diffusivity for heat [m2 s-1]
# Ke = eddy diffusivity for water vapor [m2 s-1]
# Lv = latent heat of vaporization [J kg-1]
# Lf = latent heat of fusion [J kg-1]
# ------------------------------------------------------
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat
# De = bulk exchange coeff for vapor
# ------------------------------------------------------
# T_air = air temperature [deg_C]
# T_surf = surface temperature [deg_C]
# T_snow = average snow temperature [deg_C]
# RH = relative humidity [unitless] (in [0,1])
# e_air = air vapor pressure at height z [mbar]
# e_surf = surface vapor pressure [mbar]
# ------------------------------------------------------
# h_snow = snow depth [m]
# z = height where wind speed is uz [m]
# uz = wind speed at height z [m/s]
# p0 = atmospheric pressure [mbar]
# T0 = snow temperature when isothermal [deg_C]
# (This is usually 0.)
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# (Values from page 1033: 0.0013, 0.02 [m])
# kappa = von Karman's constant [unitless] = 0.41
# dt = snowmelt timestep [seconds]
#----------------------------------------------------------------
Q_sum = self.Qn_SW + self.Qn_LW + self.Qh + \
self.Qe + self.Qa + self.Qc # [W m-2]
if (np.ndim( self.Q_sum) == 0):
self.Q_sum.fill( Q_sum ) #### (mutable scalar)
else:
self.Q_sum[:] = Q_sum # [W m-2]
# update_net_energy_flux()
#-------------------------------------------------------------------
def open_input_files(self):
if (self.DEBUG):
print 'Calling open_input_files()...'
self.rho_H2O_file = self.in_directory + self.rho_H2O_file
self.P_file = self.in_directory + self.P_file
self.T_air_file = self.in_directory + self.T_air_file
self.T_surf_file = self.in_directory + self.T_surf_file
self.RH_file = self.in_directory + self.RH_file
self.p0_file = self.in_directory + self.p0_file
self.uz_file = self.in_directory + self.uz_file
self.z_file = self.in_directory + self.z_file
self.z0_air_file = self.in_directory + self.z0_air_file
self.albedo_file = self.in_directory + self.albedo_file
self.em_surf_file = self.in_directory + self.em_surf_file
self.dust_atten_file = self.in_directory + self.dust_atten_file
self.cloud_factor_file = self.in_directory + self.cloud_factor_file
self.canopy_factor_file = self.in_directory + self.canopy_factor_file
self.rho_H2O_unit = model_input.open_file(self.rho_H2O_type, self.rho_H2O_file)
self.P_unit = model_input.open_file(self.P_type, self.P_file)
self.T_air_unit = model_input.open_file(self.T_air_type, self.T_air_file)
self.T_surf_unit = model_input.open_file(self.T_surf_type, self.T_surf_file)
self.RH_unit = model_input.open_file(self.RH_type, self.RH_file)
self.p0_unit = model_input.open_file(self.p0_type, self.p0_file)
self.uz_unit = model_input.open_file(self.uz_type, self.uz_file)
self.z_unit = model_input.open_file(self.z_type, self.z_file)
self.z0_air_unit = model_input.open_file(self.z0_air_type, self.z0_air_file)
#-----------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#-----------------------------------------------
self.albedo_unit = model_input.open_file(self.albedo_type,
self.albedo_file)
self.em_surf_unit = model_input.open_file(self.em_surf_type,
self.em_surf_file)
self.dust_atten_unit = model_input.open_file(self.dust_atten_type,
self.dust_atten_file)
self.cloud_factor_unit = model_input.open_file(self.cloud_factor_type,
self.cloud_factor_file)
self.canopy_factor_unit = model_input.open_file(self.canopy_factor_type,
self.canopy_factor_file)
#----------------------------------------------------------------------------
# Note: GMT_offset plus slope and aspect grids will be read separately.
#----------------------------------------------------------------------------
## self.Qn_SW_unit = model_input.open_file(self.Qn_SW_type, self.Qn_SW_file)
## self.Qn_LW_unit = model_input.open_file(self.Qn_LW_type, self.Qn_LW_file)
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
if (self.DEBUG):
print 'Calling read_input_files()...'
rti = self.rti
#--------------------------------------------------------
# All grids are assumed to have a data type of Float32.
#--------------------------------------------------------
# NB! read_next() returns None if TYPE arg is "Scalar".
#--------------------------------------------------------
rho_H2O = model_input.read_next(self.rho_H2O_unit, self.rho_H2O_type, rti)
if (rho_H2O is not None): self.rho_H2O = rho_H2O
P = model_input.read_next(self.P_unit, self.P_type, rti,
factor=self.mmph_to_mps)
if (P is not None):
## print 'MET: (time,P) =', self.time, P
## if (self.P_type.lower() != 'scalar'):
if (np.ndim( self.P ) == 0):
self.P.fill( P ) #### (2/7/13, mutable scalar)
else:
self.P = P
if (self.DEBUG or (self.time_index == 0)):
print 'In read_input_files():'
print ' min(P) =', P.min() * self.mps_to_mmph, ' [mmph]'
print ' max(P) =', P.max() * self.mps_to_mmph, ' [mmph]'
print ' '
else:
#-----------------------------------------------
# Either self.P_type is "Scalar" or we've read
# all of the data in the rain_rates file.
#-----------------------------------------------
if (self.P_type.lower() != 'scalar'):
#------------------------------------
# Precip is unique in this respect.
#--------------------------------------------------
# 2/7/13. Note that we don't change P from grid
# to scalar since that could cause trouble for
# other comps that use P, so we just zero it out.
#-------------------------------------------------- #
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of file:', self.P_file
print ' P set to 0 by read_input_files().'
elif (self.time_sec >= self.dt):
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of scalar rainfall duration.'
print ' P set to 0 by read_input_files().'
## print 'time_sec =', self.time_sec
## print 'met dt =', self.dt
## print '######### In met_base.read_input_files() #######'
## print 'self.P_type =', self.P_type
## print 'self.P =', self.P
###############################################################
# If any of these are scalars (read from a time series file)
# then we'll need to use "fill()" method to prevent breaking
# the reference to the "mutable scalar". (2/7/13)
###############################################################
T_air = model_input.read_next(self.T_air_unit, self.T_air_type, rti)
if (T_air is not None): self.T_air = T_air
T_surf = model_input.read_next(self.T_surf_unit, self.T_surf_type, rti)
if (T_surf is not None): self.T_surf = T_surf
RH = model_input.read_next(self.RH_unit, self.RH_type, rti)
if (RH is not None): self.RH = RH
p0 = model_input.read_next(self.p0_unit, self.p0_type, rti)
if (p0 is not None): self.p0 = p0
uz = model_input.read_next(self.uz_unit, self.uz_type, rti)
if (uz is not None): self.uz = uz
z = model_input.read_next(self.z_unit, self.z_type, rti)
if (z is not None): self.z = z
z0_air = model_input.read_next(self.z0_air_unit, self.z0_air_type, rti)
if (z0_air is not None): self.z0_air = z0_air
#----------------------------------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#----------------------------------------------------------------------------
# Note: We could later write a version of read_next() that takes "self"
# and "var_name" as args and that uses "exec()".
#----------------------------------------------------------------------------
albedo = model_input.read_next(self.albedo_unit, self.albedo_type, rti)
if (albedo is not None): self.albedo = albedo
em_surf = model_input.read_next(self.em_surf_unit, self.em_surf_type, rti)
if (em_surf is not None): self.em_surf = em_surf
dust_atten = model_input.read_next(self.dust_atten_unit, self.dust_atten_type, rti)
if (dust_atten is not None): self.dust_atten = dust_atten
cloud_factor = model_input.read_next(self.cloud_factor_unit, self.cloud_factor_type, rti)
if (cloud_factor is not None): self.cloud_factor = cloud_factor
canopy_factor = model_input.read_next(self.canopy_factor_unit, self.canopy_factor_type, rti)
if (canopy_factor is not None): self.canopy_factor = canopy_factor
#-------------------------------------------------------------
# Compute Qsw_prefactor from cloud_factor and canopy factor.
#-------------------------------------------------------------
## self.Qsw_prefactor =
#-------------------------------------------------------------
# These are currently treated as input data, but are usually
# generated by functions in Qnet_file.py. Later on, we'll
# provide the option to compute them "on the fly" with new
# functions called "update_net_shortwave_radiation()" and
# "update_net_longwave_radiation()", called from update().
#-------------------------------------------------------------
## Qn_SW = model_input.read_next(self.Qn_SW_unit, self.Qn_SW_type, rti)
## if (Qn_SW is not None): self.Qn_SW = Qn_SW
##
## Qn_LW = model_input.read_next(self.Qn_LW_unit, self.Qn_LW_type, rti)
## if (Qn_LW is not None): self.Qn_LW = Qn_LW
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
if (self.DEBUG):
print 'Calling close_input_files()...'
if (self.P_type != 'Scalar'): self.P_unit.close()
if (self.T_air_type != 'Scalar'): self.T_air_unit.close()
if (self.T_surf_type != 'Scalar'): self.T_surf_unit.close()
if (self.RH_type != 'Scalar'): self.RH_unit.close()
if (self.p0_type != 'Scalar'): self.p0_unit.close()
if (self.uz_type != 'Scalar'): self.uz_unit.close()
if (self.z_type != 'Scalar'): self.z_unit.close()
if (self.z0_air_type != 'Scalar'): self.z0_air_unit.close()
#---------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#---------------------------------------------------
if (self.albedo_type != 'Scalar'): self.albedo_unit.close()
if (self.em_surf_type != 'Scalar'): self.em_surf_unit.close()
if (self.dust_atten_type != 'Scalar'): self.dust_atten_unit.close()
if (self.cloud_factor_type != 'Scalar'): self.cloud_factor_unit.close()
if (self.canopy_factor_type != 'Scalar'): self.canopy_factor_unit.close()
## if (self.Qn_SW_type != 'Scalar'): self.Qn_SW_unit.close()
## if (self.Qn_LW_type != 'Scalar'): self.Qn_LW_unit.close()
## if (self.P_file != ''): self.P_unit.close()
## if (self.T_air_file != ''): self.T_air_unit.close()
## if (self.T_surf_file != ''): self.T_surf_unit.close()
## if (self.RH_file != ''): self.RH_unit.close()
## if (self.p0_file != ''): self.p0_unit.close()
## if (self.uz_file != ''): self.uz_unit.close()
## if (self.z_file != ''): self.z_unit.close()
## if (self.z0_air_file != ''): self.z0_air_unit.close()
## #--------------------------------------------------------
## if (self.Qn_SW_file != ''): self.Qn_SW_unit.close()
## if (self.Qn_LW_file != ''): self.Qn_LW_unit.close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
if (self.DEBUG):
print 'Calling update_outfile_names()...'
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.ea_gs_file = (self.out_directory + self.ea_gs_file )
self.es_gs_file = (self.out_directory + self.es_gs_file )
self.Qsw_gs_file = (self.out_directory + self.Qsw_gs_file )
self.Qlw_gs_file = (self.out_directory + self.Qlw_gs_file )
self.ema_gs_file = (self.out_directory + self.ema_gs_file )
#------------------------------------------------------------
self.ea_ts_file = (self.out_directory + self.ea_ts_file )
self.es_ts_file = (self.out_directory + self.es_ts_file )
self.Qsw_ts_file = (self.out_directory + self.Qsw_ts_file )
self.Qlw_ts_file = (self.out_directory + self.Qlw_ts_file )
self.ema_ts_file = (self.out_directory + self.ema_ts_file )
## self.ea_gs_file = (self.case_prefix + '_2D-ea.rts')
## self.es_gs_file = (self.case_prefix + '_2D-es.rts')
## #-----------------------------------------------------
## self.ea_ts_file = (self.case_prefix + '_0D-ea.txt')
## self.es_ts_file = (self.case_prefix + '_0D-es.txt')
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
if (self.DEBUG):
print 'Calling open_output_files()...'
model_output.check_netcdf()
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
if (self.SAVE_EA_GRIDS):
model_output.open_new_gs_file( self, self.ea_gs_file, self.rti,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_GRIDS):
model_output.open_new_gs_file( self, self.es_gs_file, self.rti,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_GRIDS):
model_output.open_new_gs_file( self, self.Qsw_gs_file, self.rti,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_GRIDS):
model_output.open_new_gs_file( self, self.Qlw_gs_file, self.rti,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_GRIDS):
model_output.open_new_gs_file( self, self.ema_gs_file, self.rti,
var_name='ema',
long_name='air_emissivity',
units_name='none')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_EA_PIXELS):
model_output.open_new_ts_file( self, self.ea_ts_file, IDs,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_PIXELS):
model_output.open_new_ts_file( self, self.es_ts_file, IDs,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_PIXELS):
model_output.open_new_ts_file( self, self.Qsw_ts_file, IDs,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_PIXELS):
model_output.open_new_ts_file( self, self.Qlw_ts_file, IDs,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_PIXELS):
model_output.open_new_ts_file( self, self.ema_ts_file, IDs,
var_name='ema',
long_name='air_emissivity',
units_name='none')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
if (self.DEBUG):
print 'Calling write_output_files()...'
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
if (self.SAVE_EA_GRIDS): model_output.close_gs_file( self, 'ea')
if (self.SAVE_ES_GRIDS): model_output.close_gs_file( self, 'es')
if (self.SAVE_QSW_GRIDS): model_output.close_gs_file( self, 'Qsw')
if (self.SAVE_QLW_GRIDS): model_output.close_gs_file( self, 'Qlw')
if (self.SAVE_EMA_GRIDS): model_output.close_gs_file( self, 'ema')
#-------------------------------------------------------------------
if (self.SAVE_EA_PIXELS): model_output.close_ts_file( self, 'ea')
if (self.SAVE_ES_PIXELS): model_output.close_ts_file( self, 'es')
if (self.SAVE_QSW_PIXELS): model_output.close_ts_file( self, 'Qsw')
if (self.SAVE_QLW_PIXELS): model_output.close_ts_file( self, 'Qlw')
if (self.SAVE_EMA_PIXELS): model_output.close_ts_file( self, 'ema')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
if (self.SAVE_EA_GRIDS):
model_output.add_grid( self, self.e_air, 'ea', self.time_min )
if (self.SAVE_ES_GRIDS):
model_output.add_grid( self, self.e_surf, 'es', self.time_min )
if (self.SAVE_QSW_GRIDS):
model_output.add_grid( self, self.Qn_SW, 'Qsw', self.time_min )
if (self.SAVE_QLW_GRIDS):
model_output.add_grid( self, self.Qn_LW, 'Qlw', self.time_min )
if (self.SAVE_EMA_GRIDS):
model_output.add_grid( self, self.em_air, 'ema', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self):
IDs = self.outlet_IDs
time = self.time_min ######
if (self.SAVE_EA_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_air, 'ea', IDs )
if (self.SAVE_ES_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_surf, 'es', IDs )
if (self.SAVE_QSW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_SW, 'Qsw', IDs )
if (self.SAVE_QLW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_LW, 'Qlw', IDs )
if (self.SAVE_EMA_PIXELS):
model_output.add_values_at_IDs( self, time, self.em_air, 'ema', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
#---------------------------------------------------------------------------------
def compare_em_air_methods():
#--------------------------------------------------------------
# Notes: There are two different methods that are commonly
# used to compute the vapor pressure of air, e_air,
# and then the emissivity of air, em_air, for use in
# longwave radiation calculations. This routine
# compares them graphically.
#
# NB! This hasn't been tested since conversion from IDL.
#-------------------------------------------------------------
import matplotlib.pyplot
T_air = np.arange(80, dtype='Float32') - np.float64(40) #[Celsius] (-40 to 40)
RH = np.float64(1.0)
C2K = np.float64(273.15)
#--------------------------
# Brutsaert (1975) method
#--------------------------
term1 = (np.float64(17.3) * T_air) / (T_air + np.float64(237.3)) ######### DOUBLE CHECK THIS (7/26/13)
e_air1 = RH * np.float64(0.611) * np.exp( term1 ) # [kPa]
em_air1 = np.float64(1.72) * (e_air1 / (T_air + C2K)) ** (np.float64(1) / 7)
#---------------------------
# Satterlund (1979) method
#----------------------------
# NB! e_air has units of Pa
#----------------------------
term2 = np.float64(2353) / (T_air + C2K)
e_air2 = RH * np.float64(10) ** (np.float64(11.40) - term2) # [Pa]
eterm = np.exp(-np.float64(1) * (e_air2 / np.float64(100)) ** ((T_air + C2K) / np.float64(2016)))
em_air2 = np.float64(1.08) * (np.float64(1) - eterm)
#----------------------------
# Plot the two e_air curves
#--------------------------------
# These two agree quite closely
#--------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, e_air1)
matplotlib.pyplot.show()
## oplot(T_air, (e_air2 / np.float64(1000)), psym=-3) # [Pa -> kPa]
#-----------------------------
# Plot the two em_air curves
#--------------------------------------------------
# These two don't agree very well for some reason
#--------------------------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, em_air1)
matplotlib.pyplot.show()
## oplot(T_air, em_air2, psym=-3)
# compare_em_air_Methods
#---------------------------------------------------------------------------------
| mit | 5,983,719,761,350,209,000 | 48.053394 | 117 | 0.417861 | false |
pablo2000/picochess | dgtlib.py | 1 | 2752 | # Copyright (C) 2013-2016 Jean-Francois Romang ([email protected])
# Shivkumar Shivaji ()
# Jürgen Précour ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from utilities import *
import time
class DgtLib(object):
"""This class simulates DGT's SO-lib File with similar API."""
def __init__(self, dgtserial):
super(DgtLib, self).__init__()
self.dgtserial = dgtserial
def write(self, command):
if command[0].value == DgtCmd.DGT_CLOCK_MESSAGE.value:
while self.dgtserial.clock_lock:
time.sleep(0.1)
self.dgtserial.write_board_command(command)
def set_text_3k(self, text, beep, ld, rd):
self.write([DgtCmd.DGT_CLOCK_MESSAGE, 0x0c, DgtClk.DGT_CMD_CLOCK_START_MESSAGE, DgtClk.DGT_CMD_CLOCK_ASCII,
text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], beep,
DgtClk.DGT_CMD_CLOCK_END_MESSAGE])
return 0
def set_text_xl(self, text, beep, ld, rd):
self.write([DgtCmd.DGT_CLOCK_MESSAGE, 0x0b, DgtClk.DGT_CMD_CLOCK_START_MESSAGE, DgtClk.DGT_CMD_CLOCK_DISPLAY,
text[2], text[1], text[0], text[5], text[4], text[3], 0x00, beep,
DgtClk.DGT_CMD_CLOCK_END_MESSAGE])
return 0
def set_and_run(self, lr, lh, lm, ls, rr, rh, rm, rs):
side = 0x04
if lr == 1 and rr == 0:
side = 0x01
if lr == 0 and rr == 1:
side = 0x02
self.write([DgtCmd.DGT_CLOCK_MESSAGE, 0x0a, DgtClk.DGT_CMD_CLOCK_START_MESSAGE, DgtClk.DGT_CMD_CLOCK_SETNRUN,
lh, lm, ls, rh, rm, rs,
side, DgtClk.DGT_CMD_CLOCK_END_MESSAGE])
self.write([DgtCmd.DGT_CLOCK_MESSAGE, 0x03, DgtClk.DGT_CMD_CLOCK_START_MESSAGE, DgtClk.DGT_CMD_CLOCK_END,
DgtClk.DGT_CMD_CLOCK_END_MESSAGE])
return 0
def end_text(self):
self.write([DgtCmd.DGT_CLOCK_MESSAGE, 0x03, DgtClk.DGT_CMD_CLOCK_START_MESSAGE, DgtClk.DGT_CMD_CLOCK_END,
DgtClk.DGT_CMD_CLOCK_END_MESSAGE])
return 0
| gpl-3.0 | -1,395,131,923,043,318,800 | 41.96875 | 117 | 0.622182 | false |
cytex124/celsius-cloud-backend | src/celsius/tools.py | 1 | 3362 | from rest_framework.pagination import PageNumberPagination
from rest_framework import serializers
from rest_framework import permissions
from collections import OrderedDict
typing_arr = {
'AutoField': 'int',
'CharField': 'string',
'DateTimeField': 'datetime',
'DateField': 'date',
'BooleanField': 'bool'
}
class FiftyItemsSetPagination(PageNumberPagination):
page_size = 50
def get_paginated_response(self, data):
response = super(
FiftyItemsSetPagination,
self
).get_paginated_response(data)
response.data['headers'] = self.get_headers(data)
return response
def get_headers(self, data):
headers = []
model = data.serializer.child.Meta.model
fields = data.serializer.child.Meta.fields
for fieldname in fields:
field = model._meta.get_field(fieldname)
headers.append({
'type': typing_arr[field.get_internal_type()],
'display': field.verbose_name,
'key': field.name
})
return headers
class NextPrevSerializer(serializers.ModelSerializer):
next_id = serializers.SerializerMethodField()
def get_next_id(self, obj):
try:
res = self.context['view'].queryset.filter(id__gt=obj.id)
if len(res):
return res[0].id
except:
pass
return None
prev_id = serializers.SerializerMethodField()
def get_prev_id(self, obj):
try:
res = self.context['view'].queryset.filter(id__lt=obj.id)
if len(res):
return res[len(res)-1].id
except:
pass
return None
class SepView(object):
def get(self, request, *args, **kwargs):
result = super(SepView, self).get(request, *args, **kwargs)
sepdata = OrderedDict()
sepdata['result'] = OrderedDict()
for key in result.data:
if key in ['next_id', 'prev_id']:
sepdata[key] = result.data[key]
else:
sepdata['result'][key] = result.data[key]
result.data = sepdata
return result
class IsOwner(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
if request.user.is_staff or request.user.is_superuser:
return True
return obj.owner == request.user
_registered_models_for_permission_handling = []
def register_for_permission_handling(model):
_registered_models_for_permission_handling.append(model)
def get_models_for_permission_handling():
return _registered_models_for_permission_handling
_register_models_for_url_list_handling = []
def register_for_urllist_handling(name, model):
_register_models_for_url_list_handling.append((name, model))
def get_models_for_urllist_handling():
return _register_models_for_url_list_handling
_register_models_for_form_handling = []
def register_for_form_handling(name, createview, view, allview):
_register_models_for_form_handling.append(
(name, createview, view, allview)
)
def get_models_for_form_handling():
return _register_models_for_form_handling
| mit | 795,435,036,950,483,000 | 26.112903 | 73 | 0.6279 | false |
tsolar/bikepolo-tournament | base/models.py | 1 | 3221 | # coding: utf-8
from django.contrib.auth.models import User
from django.db import models
from django.forms import ModelForm
from django.http import HttpResponse
NOMBRE_MAX_LENGTH = 100
class Jugador(models.Model):
user = models.OneToOneField(User, null=True, blank=True)
nombre = models.CharField(max_length = NOMBRE_MAX_LENGTH)
#equipos = models.ManyToManyField(Equipo, related_name='equipos')
def __unicode__(self):
if self.user:
return self.user.username
elif self.nombre == '':
return "<Jugador #%s>" % self.id
return self.nombre
# cuando se crea un usuario, se crea un jugador asociado a ese usuario
def user_post_save(sender, instance, **kwargs):
jugador, new = Jugador.objects.get_or_create(user=instance)
models.signals.post_save.connect(user_post_save, sender=User)
class Equipo(models.Model):
nombre = models.CharField(max_length = NOMBRE_MAX_LENGTH)
jugadores = models.ManyToManyField(Jugador, through='MembresiaEquipo',
related_name='equipos')
def __unicode__(self):
return self.nombre
def agregar_jugador(self, jugador, aprobado=False):
membresia, created = MembresiaEquipo.objects.get_or_create(
jugador=jugador, equipo=self)
membresia.aprobado = aprobado
membresia.save()
return membresia, created
"""Obtiene los jugadores con aprobación pendiente"""
def jugadores_pendientes(self):
membresias_equipo = self.membresias.all()
jugadores_pendientes = []
for membresia in membresias_equipo:
if membresia.aprobado is not True and membresia.es_admin is False:
jugadores_pendientes.append(membresia.jugador)
return jugadores_pendientes
class EquipoForm(ModelForm):
class Meta:
model = Equipo
fields = ['nombre', 'jugadores']
class MembresiaEquipo(models.Model):
jugador = models.ForeignKey(Jugador, related_name='membresia_equipos')
equipo = models.ForeignKey(Equipo, related_name='membresias')
aprobado = models.BooleanField(default=False)
es_admin = models.BooleanField(default=False)
es_capitan = models.BooleanField(default=False)
# date_joined = models.DateField()
# invite_reason = models.CharField(max_length=64)
class Meta:
unique_together = ("jugador", "equipo")
def __unicode__(self):
return 'El jugador "%s" juega en "%s"' % (self.jugador, self.equipo)
def save(self, *args, **kwargs):
membresia = MembresiaEquipo.objects.filter(equipo=self.equipo)
if not membresia:
self.aprobado = True
self.es_admin = True
self.es_capitan = True
instance = super(MembresiaEquipo, self).save(*args, **kwargs)
return instance
class Partido(models.Model):
equipo1 = models.ForeignKey(Equipo, related_name='equipo1')
equipo2 = models.ForeignKey(Equipo, related_name='equipo2')
goles1 = models.IntegerField(default=0)
goles2 = models.IntegerField(default=0)
fecha = models.DateTimeField()
def __unicode__(self):
return '%s vs %s (%s)' % (self.equipo1, self.equipo2, self.fecha)
| gpl-3.0 | 3,063,254,844,025,939,500 | 32.894737 | 78 | 0.66646 | false |
minio/minio-py | tests/unit/put_object_test.py | 1 | 1462 | # -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import raises
from minio import Minio
class PutObjectTest(TestCase):
@raises(TypeError)
def test_object_is_string(self):
client = Minio('localhost:9000')
client.put_object('hello', 1234, 1, iter([1, 2, 3]))
@raises(ValueError)
def test_object_is_not_empty_string(self):
client = Minio('localhost:9000')
client.put_object('hello', ' \t \n ', 1, iter([1, 2, 3]))
@raises(TypeError)
def test_length_is_string(self):
client = Minio('localhost:9000')
client.put_object('hello', 1234, '1', iter([1, 2, 3]))
@raises(ValueError)
def test_length_is_not_empty_string(self):
client = Minio('localhost:9000')
client.put_object('hello', ' \t \n ', -1, iter([1, 2, 3]))
| apache-2.0 | -6,651,362,612,586,482,000 | 33 | 74 | 0.673735 | false |
boknilev/dsl-char-cnn | src/cnn_multifilter_cv.py | 1 | 6190 | '''CNN code for DSL 2016 task 2, with cross validation
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Embedding, merge
from keras.layers import Convolution1D, MaxPooling1D
#from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import np_utils
#from keras.regularizers import l1, l2, l1l2, activity_l1, activity_l2, activity_l1l2
#from keras.layers.normalization import BatchNormalization
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.cross_validation import StratifiedKFold
from data import load_file, load_labels, alphabet, full_train_file, labels_file
# limit tensorflow memory usage
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
# set parameters:
print('Hyperparameters:')
alphabet_size = len(alphabet) + 2 # add 2, one padding and unknown chars
print('Alphabet size:', alphabet_size)
maxlen = 400
print('Max text len:', maxlen)
batch_size = 16
print('Batch size:', batch_size)
embedding_dims = 50
print('Embedding dim:', embedding_dims)
#nb_filters = [50,50,100,100,100,100,100]
nb_filters = [80,80,80]
print('Number of filters:', nb_filters)
#filter_lengths = [1,2,3,4,5,6,7]
filter_lengths = [4,5,6]
print('Filter lengths:', filter_lengths)
hidden_dims = 250
print('Hidden dems:', hidden_dims)
nb_epoch = 20
embedding_droupout = 0.2
print('Embedding dropout:', embedding_droupout)
fc_dropout = 0.5
print('Fully-connected dropout:', fc_dropout)
# cross validation
n_folds = 10
print('Loading data...')
X_train, y_train, num_classes = load_file(full_train_file, alphabet)
print(len(X_train), 'train sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
print('X_train shape:', X_train.shape)
y_train = np.array(y_train)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, num_classes)
def make_model(maxlen, alphabet_size, embedding_dims, embedding_droupout,
nb_filters, filter_lengths, hidden_dims, fc_dropout,
num_classes):
print('Build model...')
main_input = Input(shape=(maxlen,))
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
embedding_layer = Embedding(alphabet_size,
embedding_dims,
input_length=maxlen,
dropout=embedding_droupout)
embedded = embedding_layer(main_input)
# we add a Convolution1D for each filter length, which will learn nb_filters[i]
# word group filters of size filter_lengths[i]:
convs = []
for i in xrange(len(nb_filters)):
conv_layer = Convolution1D(nb_filter=nb_filters[i],
filter_length=filter_lengths[i],
border_mode='valid',
activation='relu',
subsample_length=1)
conv_out = conv_layer(embedded)
# we use max pooling:
conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
# We flatten the output of the conv layer,
# so that we can concat all conv outpus and add a vanilla dense layer:
conv_out = Flatten()(conv_out)
convs.append(conv_out)
# concat all conv outputs
x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
#concat = BatchNormalization()(concat)
# We add a vanilla hidden layer:
x = Dense(hidden_dims)(x)
x = Dropout(fc_dropout)(x)
x = Activation('relu')(x)
# We project onto number of classes output layer, and squash it with a softmax:
main_output = Dense(num_classes, activation='softmax')(x)
# finally, define the model
model = Model(input=main_input, output=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train_and_evaluate_model(model, X_train, Y_train, X_test, Y_test, y_test, fold):
# y_test is labels, Y_test is categorical labels
print('Train...')
stopping = EarlyStopping(monitor='val_loss', patience='10')
model_filename = "cnn_model_gpu_multifilter_fold{}.hdf5".format(fold)
checkpointer = ModelCheckpoint(filepath=model_filename, verbose=1, save_best_only=True)
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=[stopping,checkpointer],
verbose=2)
probabilities = model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
acc = accuracy_score(y_test, predictions)
print('Accuracy score (final model): {}'.format(acc))
best_model = load_model(model_filename)
probabilities = best_model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
best_acc = accuracy_score(y_test, predictions)
print('Accuracy score (best model): {}'.format(best_acc))
return best_acc
# run cross validation (based on: https://github.com/fchollet/keras/issues/1711)
skf = StratifiedKFold(y_train, n_folds=n_folds, shuffle=True)
accuracies = []
for k, (train, test) in enumerate(skf):
print("Running fold {}/{}".format(k+1, n_folds))
model = None # clearing the NN
model = make_model(maxlen, alphabet_size, embedding_dims, embedding_droupout,
nb_filters, filter_lengths, hidden_dims, fc_dropout,
num_classes)
acc = train_and_evaluate_model(model, X_train[train], Y_train[train], X_train[test], Y_train[test], y_train[test], k+1)
accuracies.append(acc)
print('Accuracies of all folds:')
print(accuracies)
| mit | 8,944,023,813,990,312,000 | 37.209877 | 123 | 0.677868 | false |
catmaid/CATMAID | django/applications/catmaid/control/skeleton.py | 1 | 174650 | # -*- coding: utf-8 -*-
from collections import defaultdict
import csv
from datetime import datetime, timedelta
from itertools import chain
import dateutil.parser
import json
import networkx as nx
import pytz
import re
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Union
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseBadRequest, Http404, \
JsonResponse, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.db import connection
from django.db.models import Q
from django.views.decorators.cache import never_cache
from rest_framework.decorators import api_view
from catmaid.control import tracing
from catmaid.models import (Project, UserRole, Class, ClassInstance, Review,
ClassInstanceClassInstance, Relation, Sampler, Treenode,
TreenodeConnector, SamplerDomain, SkeletonSummary, SamplerDomainEnd,
SamplerInterval, SamplerDomainType, SkeletonOrigin, User)
from catmaid.objects import Skeleton, SkeletonGroup, \
compartmentalize_skeletongroup_by_edgecount, \
compartmentalize_skeletongroup_by_confidence
from catmaid.control.authentication import requires_user_role, \
can_edit_class_instance_or_fail, can_edit_or_fail, can_edit_all_or_fail
from catmaid.control.common import (insert_into_log, get_class_to_id_map,
get_relation_to_id_map, _create_relation, get_request_bool,
get_request_list, Echo)
from catmaid.control.link import LINK_TYPES
from catmaid.control.neuron import _delete_if_empty
from catmaid.control.annotation import (annotations_for_skeleton,
create_annotation_query, _annotate_entities, _update_neuron_annotations)
from catmaid.control.provenance import get_data_source, normalize_source_url
from catmaid.control.review import get_review_status
from catmaid.control.tree_util import find_root, reroot, edge_count_to_root
from catmaid.control.volume import get_volume_details
def get_skeleton_permissions(request:HttpRequest, project_id, skeleton_id) -> JsonResponse:
""" Tests editing permissions of a user on a skeleton and returns the
result as JSON object."""
try:
nn = _get_neuronname_from_skeletonid( project_id, skeleton_id )
can_edit = can_edit_class_instance_or_fail(request.user,
nn['neuronid'])
except:
can_edit = False
permissions = {
'can_edit': can_edit,
}
return JsonResponse(permissions)
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def open_leaves(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
"""List open leaf nodes in a skeleton.
Return a list of the ID and location of open leaf nodes in a skeleton,
their path length distance to the specified treenode, and their creation
time.
Leaves are considered open if they are not tagged with a tag matching
a particular regex.
.. note:: This endpoint is used interactively by the client so performance
is critical.
---
parameters:
- name: treenode_id
description: ID of the origin treenode for path length distances
required: true
type: integer
paramType: form
models:
open_leaf_node:
id: open_leaf_node
properties:
- description: ID of an open leaf treenode
type: integer
required: true
- description: Node location
type: array
items:
type: number
format: double
required: true
- description: Distance from the query node
type: number
format: double
required: true
- description: Node creation time
type: string
format: date-time
required: true
type:
- type: array
items:
$ref: open_leaf_node
required: true
"""
treenode_id = int(request.POST['treenode_id'])
nearest, _ = _open_leaves(project_id, skeleton_id, treenode_id)
return JsonResponse(nearest, safe=False)
def _open_leaves(project_id, skeleton_id, tnid=None):
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ['labeled_as'])
labeled_as = relations['labeled_as']
# Select all nodes and their tags
cursor.execute('''
SELECT t.id, t.parent_id
FROM treenode t
WHERE t.skeleton_id = %s
''', (int(skeleton_id),))
# Some entries repeated, when a node has more than one tag
# Create a graph with edges from parent to child, and accumulate parents.
tree = nx.DiGraph()
n_nodes = 0
for row in cursor.fetchall():
n_nodes += 1
node_id = row[0]
if row[1]:
# It is ok to add edges that already exist: DiGraph doesn't keep duplicates
tree.add_edge(row[1], node_id)
else:
tree.add_node(node_id)
# Default to root node
if not tnid:
tnid = node_id
if tnid not in tree:
raise ValueError("Could not find %s in skeleton %s" % (tnid, int(skeleton_id)))
reroot(tree, tnid)
distances = edge_count_to_root(tree, root_node=tnid)
leaves = set()
for node_id, out_degree in tree.out_degree_iter():
if 0 == out_degree or node_id == tnid and 1 == out_degree:
# Found an end node
leaves.add(node_id)
# Select all nodes and their tags
cursor.execute('''
SELECT t.id, t.location_x, t.location_y, t.location_z, t.creation_time, array_agg(ci.name)
FROM treenode t
JOIN UNNEST(%s::bigint[]) AS leaves (tnid)
ON t.id = leaves.tnid
LEFT OUTER JOIN (
treenode_class_instance tci
INNER JOIN class_instance ci
ON tci.class_instance_id = ci.id
AND tci.relation_id = %s)
ON t.id = tci.treenode_id
GROUP BY t.id
''', (list(leaves), labeled_as))
# Iterate end nodes to find which are open.
nearest = []
end_tags = ['uncertain continuation', 'not a branch', 'soma',
r'^(?i)(really|uncertain|anterior|posterior)?\s?ends?$']
end_regex = re.compile('(?:' + ')|(?:'.join(end_tags) + ')')
for row in cursor.fetchall():
node_id = row[0]
tags = row[5]
# Check if not tagged with a tag containing 'end'
if tags == [None] or not any(end_regex.match(s) for s in tags):
# Found an open end
d = distances[node_id]
nearest.append([node_id, (row[1], row[2], row[3]), d, row[4]])
return nearest, n_nodes
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_labels(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
"""List nodes in a skeleton with labels matching a query.
Find all nodes in this skeleton with labels (front-end node tags) matching
a regular expression, sort them by ascending path distance from a treenode
in the skeleton, and return the result.
---
parameters:
- name: treenode_id
description: ID of the origin treenode for path length distances
required: true
type: integer
paramType: form
- name: label_regex
description: Regular expression query to match labels
required: true
type: string
paramType: form
- name: only_leaves
description: Whether to only return information on leaf nodes.
type: boolean
required: false
defaultValue: false
paramType: form
models:
find_labels_node:
id: find_labels_node
properties:
- description: ID of a node with a matching label
type: integer
required: true
- description: Node location
type: array
items:
type: number
format: double
required: true
- description: Path distance from the origin treenode
type: number
format: double
required: true
- description: Labels on this node matching the query
type: array
items:
type: string
required: true
type:
- type: array
items:
$ref: find_labels_node
required: true
"""
tnid = int(request.POST['treenode_id'])
label_regex = str(request.POST['label_regex'])
only_leaves = get_request_bool(request.POST, 'only_leaves', False)
return JsonResponse(_find_labels(project_id, skeleton_id, label_regex, tnid,
only_leaves), safe=False)
def _find_labels(project_id, skeleton_id, label_regex, tnid=None,
only_leaves=False):
cursor = connection.cursor()
if tnid is None:
cursor.execute("""
SELECT id FROM treenode
WHERE skeleton_id = %(skeleton_id)s AND parent_id IS NULL
""")
tnid = cursor.fetchone()[0]
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id))
labeled_as = cursor.fetchone()[0]
# Select all nodes in the skeleton and any matching labels
cursor.execute('''
SELECT
t.id,
t.parent_id,
t.location_x,
t.location_y,
t.location_z,
ci.name
FROM treenode t
LEFT OUTER JOIN (
treenode_class_instance tci
INNER JOIN class_instance ci
ON (tci.class_instance_id = ci.id AND tci.relation_id = %s AND ci.name ~ %s))
ON t.id = tci.treenode_id
WHERE t.skeleton_id = %s
''', (labeled_as, label_regex, int(skeleton_id)))
# Some entries repeated, when a node has more than one matching label
# Create a graph with edges from parent to child, and accumulate parents
tree = nx.DiGraph()
for row in cursor.fetchall():
nodeID = row[0]
if row[1]:
# It is ok to add edges that already exist: DiGraph doesn't keep duplicates
tree.add_edge(row[1], nodeID)
else:
tree.add_node(nodeID)
tree.node[nodeID]['loc'] = (row[2], row[3], row[4])
if row[5]:
props = tree.node[nodeID]
tags = props.get('tags')
if tags:
tags.append(row[5])
else:
props['tags'] = [row[5]]
if tnid not in tree:
raise ValueError("Could not find %s in skeleton %s" % (tnid, int(skeleton_id)))
reroot(tree, tnid)
distances = edge_count_to_root(tree, root_node=tnid)
nearest = []
leaves = set()
if only_leaves:
for node_id, out_degree in tree.out_degree_iter():
if 0 == out_degree or node_id == tnid and 1 == out_degree:
# Found an end node
leaves.add(node_id)
for nodeID, props in tree.nodes_iter(data=True):
if only_leaves and nodeID not in leaves:
continue
if 'tags' in props:
# Found a node with a matching label
d = distances[nodeID]
nearest.append([nodeID, props['loc'], d, props['tags']])
nearest.sort(key=lambda n: n[2])
return nearest
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def within_spatial_distance(request:HttpRequest, project_id=None) -> JsonResponse:
"""Find skeletons within a given L-infinity distance of a treenode.
Returns at most 100 results.
---
parameters:
- name: treenode_id
description: ID of the origin treenode to search around
required: true
type: integer
paramType: form
- name: distance
description: L-infinity distance in nanometers within which to search
required: false
default: 0
type: integer
paramType: form
- name: size_mode
description: |
Whether to return skeletons with only one node in the search area
(1) or more than one node in the search area (0).
required: false
default: 0
type: integer
paramType: form
type:
reached_limit:
description: Whether the limit of at most 100 skeletons was reached
type: boolean
required: true
skeletons:
description: IDs of skeletons matching the search criteria
type: array
required: true
items:
type: integer
"""
project_id = int(project_id)
tnid = request.POST.get('treenode_id', None)
if not tnid:
raise ValueError("Need a treenode!")
tnid = int(tnid)
distance = int(request.POST.get('distance', 0))
if 0 == distance:
return JsonResponse({"skeletons": []})
size_mode = int(request.POST.get("size_mode", 0))
having = ""
if 0 == size_mode:
having = "HAVING count(*) > 1"
elif 1 == size_mode:
having = "HAVING count(*) = 1"
# else, no constraint
cursor = connection.cursor()
cursor.execute('SELECT location_x, location_y, location_z FROM treenode WHERE id=%s' % tnid)
pos = cursor.fetchone()
limit = 100
x0 = pos[0] - distance
x1 = pos[0] + distance
y0 = pos[1] - distance
y1 = pos[1] + distance
z0 = pos[2] - distance
z1 = pos[2] + distance
# Cheap emulation of the distance
cursor.execute('''
SELECT skeleton_id, count(*)
FROM treenode
WHERE project_id = %s
AND location_x > %s
AND location_x < %s
AND location_y > %s
AND location_y < %s
AND location_z > %s
AND location_z < %s
GROUP BY skeleton_id
%s
LIMIT %s
''' % (project_id, x0, x1, y0, y1, z0, z1, having, limit))
skeletons = tuple(row[0] for row in cursor.fetchall())
return JsonResponse({"skeletons": skeletons,
"reached_limit": limit == len(skeletons)})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_statistics(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
p = get_object_or_404(Project, pk=project_id)
skel = Skeleton( skeleton_id = skeleton_id, project_id = project_id )
const_time = skel.measure_construction_time()
construction_time = f'{int(const_time / 60)} minutes {const_time % 60} seconds'
return JsonResponse({
'node_count': skel.node_count(),
'input_count': skel.input_count(),
'output_count': skel.output_count(),
'presynaptic_sites': skel.presynaptic_sites_count(),
'postsynaptic_sites': skel.postsynaptic_sites_count(),
'cable_length': int(skel.cable_length()),
'measure_construction_time': construction_time,
'percentage_reviewed': "%.2f" % skel.percentage_reviewed()})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def contributor_statistics(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
return contributor_statistics_multiple(request, project_id=project_id, skeleton_ids=[int(skeleton_id)])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def contributor_statistics_multiple(request:HttpRequest, project_id=None, skeleton_ids=None) -> JsonResponse:
contributors:DefaultDict[Any, int] = defaultdict(int)
n_nodes = 0
# Count the total number of 20-second intervals with at least one treenode in them
n_time_bins = 0
n_review_bins = 0
n_multi_review_bins = 0
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
if not skeleton_ids:
skeleton_ids = tuple(int(v) for k,v in request.POST.items() if k.startswith('skids['))
# Count time bins separately for each skeleton
time_bins = None
last_skeleton_id = None
for row in Treenode.objects.filter(skeleton_id__in=skeleton_ids).order_by('skeleton').values_list('skeleton_id', 'user_id', 'creation_time').iterator():
if last_skeleton_id != row[0]:
if time_bins:
n_time_bins += len(time_bins)
time_bins = set()
last_skeleton_id = row[0]
n_nodes += 1
contributors[row[1]] += 1
time_bins.add(int((row[2] - epoch).total_seconds() / 20))
# Process last one
if time_bins:
n_time_bins += len(time_bins)
def process_reviews(rev):
"""
Take into account that multiple people may have reviewed the same nodes
Therefore measure the time for the user that has the most nodes reviewed,
then add the nodes not reviewed by that user but reviewed by the rest
"""
seen:Set = set()
min_review_bins = set()
multi_review_bins = 0
for reviewer, treenodes in sorted(rev.items(), key=lambda x: len(x[1]), reverse=True):
reviewer_bins = set()
for treenode, timestamp in treenodes.items():
time_bin = int((timestamp - epoch).total_seconds() / 20)
reviewer_bins.add(time_bin)
if not (treenode in seen):
seen.add(treenode)
min_review_bins.add(time_bin)
multi_review_bins += len(reviewer_bins)
#
return len(min_review_bins), multi_review_bins
rev = None
last_skeleton_id = None
review_contributors:DefaultDict[Any, int] = defaultdict(int) # reviewer_id vs count of nodes reviewed
for row in Review.objects.filter(skeleton_id__in=skeleton_ids).order_by('skeleton').values_list('reviewer', 'treenode', 'review_time', 'skeleton_id').iterator():
if last_skeleton_id != row[3]:
if rev:
a, b = process_reviews(rev)
n_review_bins += a
n_multi_review_bins += b
# Reset for next skeleton
rev = defaultdict(dict)
last_skeleton_id = row[3]
#
rev[row[0]][row[1]] = row[2] # type: ignore
#
review_contributors[row[0]] += 1
# Process last one
if rev:
a, b = process_reviews(rev)
n_review_bins += a
n_multi_review_bins += b
relations = {row[0]: row[1] for row in Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id').iterator()}
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
synapses:Dict = {}
synapses[pre] = defaultdict(int)
synapses[post] = defaultdict(int)
# This may be succint but unless one knows SQL it makes no sense at all
for row in TreenodeConnector.objects.filter(
Q(relation_id=pre) | Q(relation_id=post),
skeleton_id__in=skeleton_ids
).values_list('user_id', 'relation_id').iterator():
synapses[row[1]][row[0]] += 1
return JsonResponse({
'construction_minutes': int(n_time_bins / 3.0),
'min_review_minutes': int(n_review_bins / 3.0),
'multiuser_review_minutes': int(n_multi_review_bins / 3.0),
'n_nodes': n_nodes,
'node_contributors': contributors,
'n_pre': sum(synapses[relations['presynaptic_to']].values()),
'n_post': sum(synapses[relations['postsynaptic_to']].values()),
'pre_contributors': synapses[relations['presynaptic_to']],
'post_contributors': synapses[relations['postsynaptic_to']],
'review_contributors': review_contributors
})
@requires_user_role(UserRole.Browse)
def node_count(request:HttpRequest, project_id=None, skeleton_id=None, treenode_id=None) -> JsonResponse:
# Works with either the skeleton_id or the treenode_id
p = get_object_or_404(Project, pk=project_id)
if not skeleton_id:
skeleton_id = Treenode.objects.get(pk=treenode_id).skeleton_id
skeleton_id = int(skeleton_id)
return JsonResponse({
'count': SkeletonSummary.objects.get(skeleton_id=skeleton_id).num_nodes,
'skeleton_id': skeleton_id})
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def sampler_count(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
"""Get the number of samplers associated with this skeleton.
---
parameters:
- name: project_id
description: Project of skeleton
type: integer
paramType: path
required: true
- name: skeleton_id
description: ID of the skeleton to get the sampler count for.
required: true
type: integer
paramType: path
"""
p = get_object_or_404(Project, pk=project_id)
return JsonResponse({
'n_samplers': Sampler.objects.filter(project_id=project_id, skeleton_id=skeleton_id).count(),
})
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def list_sampler_count(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get the number of samplers associated with each skeleton in the passed in
last.
---
parameters:
- name: project_id
description: Project of skeleton
type: integer
paramType: path
required: true
- name: skeleton_ids
description: IDs of the skeleton to get the sampler count for.
required: true
type: array
items:
type: integer
paramType: path
"""
p = get_object_or_404(Project, pk=project_id)
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError("Need at least one skeleton ID")
cursor = connection.cursor()
cursor.execute("""
SELECT skeleton.id, count(cs.skeleton_id)
FROM UNNEST(%(skeleton_ids)s::bigint[]) skeleton(id)
LEFT JOIN catmaid_sampler cs
ON cs.skeleton_id = skeleton.id
WHERE project_id = %(project_id)s OR cs.skeleton_id IS NULL
GROUP BY skeleton.id
""", {
'project_id': p.id,
'skeleton_ids': skeleton_ids,
})
return JsonResponse(dict(cursor.fetchall()))
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def cable_length(request:HttpRequest, project_id=None, skeleton_id=None, treenode_id=None) -> JsonResponse:
"""Get the cable length for a skeleton
---
parameters:
- name: project_id
description: Project of landmark
type: integer
paramType: path
required: true
- name: skeleton_id
description: IDs of the skeleton to get the cable length for
required: true
type: integer
paramType: path
"""
p = get_object_or_404(Project, pk=project_id)
if not skeleton_id:
if treenode_id:
skeleton_id = Treenode.objects.get(pk=treenode_id).skeleton_id
else:
raise ValueError("Need skeleton ID or treenode ID")
skeleton_id = int(skeleton_id)
return JsonResponse({
'cable_length': SkeletonSummary.objects.get(skeleton_id=skeleton_id).cable_length,
'skeleton_id': skeleton_id})
def _get_neuronname_from_skeletonid( project_id, skeleton_id ):
p = get_object_or_404(Project, pk=project_id)
qs = ClassInstanceClassInstance.objects.filter(
relation__relation_name='model_of',
project=p,
class_instance_a=int(skeleton_id)).select_related("class_instance_b")
try:
return {
'neuronname': qs[0].class_instance_b.name,
'neuronid': qs[0].class_instance_b.id
}
except IndexError:
raise ValueError("Couldn't find a neuron linking to a skeleton with " \
"ID %s" % skeleton_id)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def neuronname(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
return JsonResponse(_get_neuronname_from_skeletonid(project_id, skeleton_id))
def _neuronnames(skeleton_ids, project_id) -> dict:
qs = ClassInstanceClassInstance.objects.filter(
relation__relation_name='model_of',
project=project_id,
class_instance_a__in=skeleton_ids).select_related("class_instance_b").values_list("class_instance_a", "class_instance_b__name")
return dict(qs)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def neuronnames(request:HttpRequest, project_id=None) -> JsonResponse:
""" Returns a JSON object with skeleton IDs as keys and neuron names as values. """
skeleton_ids = tuple(get_request_list(request.POST, 'skids', map_fn=int))
return JsonResponse(_neuronnames(skeleton_ids, project_id))
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def cable_lengths(request:HttpRequest, project_id=None) -> HttpResponse:
"""Get the cable length of a set of skeletons.
Returns a mapping from skeleton ID to cable length.
---
parameters:
- name: project_id
description: Project to operate in
type: integer
paramType: path
required: true
- name: skeleton_ids[]
description: IDs of the skeletons to query cable-length for
required: true
type: array
items:
type: integer
paramType: form
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
cursor = connection.cursor()
cursor.execute("""
SELECT COALESCE(json_object_agg(css.skeleton_id, css.cable_length), '{}'::json)::text
FROM catmaid_skeleton_summary css
JOIN UNNEST(%(query_skeleton_ids)s::bigint[]) query_skeleton(id)
ON query_skeleton.id = css.skeleton_id
WHERE project_id = %(project_id)s
""", {
'query_skeleton_ids': skeleton_ids,
'project_id': project_id,
})
return HttpResponse(cursor.fetchone()[0], content_type='application/json')
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def validity(request:HttpRequest, project_id=None) -> HttpResponse:
"""Find out if passed skeleton IDs are valid (and represent present
skeletons).
Returns all passed in skeletons that are valid.
---
parameters:
- name: project_id
description: Project of landmark
type: integer
paramType: path
required: true
- name: skeleton_ids[]
description: IDs of the skeletons whose partners to find
required: true
type: array
items:
type: integer
paramType: form
- name: return_invalid
description: Whether or not to return invalid skeleton IDs rather than valid ones.
required: false
type: bool
default: false
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
return_invalid = get_request_bool(data, 'return_invalid', False)
cursor = connection.cursor()
if return_invalid:
cursor.execute("""
SELECT COALESCE(json_agg(query_skeleton.id), '[]'::json)::text
FROM UNNEST(%(query_skeleton_ids)s::bigint[]) query_skeleton(id)
LEFT JOIN catmaid_skeleton_summary css
ON css.skeleton_id = query_skeleton.id
AND css.project_id = %(project_id)s
WHERE css.skeleton_id IS NULL
""", {
'query_skeleton_ids': skeleton_ids,
'project_id': project_id,
})
else:
cursor.execute("""
SELECT COALESCE(json_agg(query_skeleton.id), '[]'::json)::text
FROM UNNEST(%(query_skeleton_ids)s::bigint[]) query_skeleton(id)
JOIN catmaid_skeleton_summary css
ON css.skeleton_id = query_skeleton.id
WHERE project_id = %(project_id)s
""", {
'query_skeleton_ids': skeleton_ids,
'project_id': project_id,
})
return HttpResponse(cursor.fetchone()[0], content_type='application/json')
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def origin_info(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get origin information of a set of skeletons.
---
parameters:
- name: project_id
description: Project to operate in
type: integer
paramType: path
required: true
- name: skeleton_ids[]
description: IDs of skeletons to get origin for
required: true
type: array
items:
type: integer
paramType: form
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
cursor = connection.cursor()
cursor.execute("""
SELECT so.skeleton_id, so.source_id, so.data_source_id
FROM skeleton_origin so
JOIN UNNEST(%(skeleton_ids)s::bigint[]) query(skeleton_id)
ON query.skeleton_id = so.skeleton_id
WHERE so.project_id = %(project_id)s
""", {
'project_id': project_id,
'skeleton_ids': skeleton_ids,
})
origin_rows = dict((c1, {
'source_id': c2,
'data_source_id': c3
}) for c1, c2, c3 in cursor.fetchall())
data_source_ids = set(v['data_source_id'] for v in origin_rows.values())
cursor.execute("""
SELECT data_source.id, name, url, source_project_id
FROM data_source
JOIN UNNEST(%(data_source_ids)s::bigint[]) query(id)
ON query.id = data_source.id
WHERE project_id = %(project_id)s
""", {
'project_id': project_id,
'data_source_ids': list(data_source_ids),
})
data_sources = dict((c1, {
'name': c2,
'url': c3,
'source_project_id': c4
}) for c1, c2, c3, c4 in cursor.fetchall())
return JsonResponse({
'data_sources': data_sources,
'origins': origin_rows,
})
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def from_origin(request:HttpRequest, project_id=None) -> JsonResponse:
"""Find mappings to existing skeletons for potential imports.
---
parameters:
- name: project_id
description: Project to operate in
type: integer
paramType: path
required: true
- name: source_ids[]
description: IDs of the source IDs to query origin for
required: true
type: array
items:
type: integer
paramType: form
- name: source_url
description: Source URL of skeletons
type: string
paramType: path
required: true
- name: source_project_id
description: Source project ID of skeletons
type: integer
paramType: path
required: true
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
source_ids = get_request_list(data, 'source_ids', map_fn=int)
if not source_ids:
raise ValueError('Need at least one source ID')
source_url = data.get('source_url')
if not source_url:
raise ValueError('Need source_url for origin lookup')
source_url = normalize_source_url(source_url)
source_project_id = data.get('source_project_id')
if source_project_id is None:
raise ValueError("Need source_project_id for origin lookup")
cursor = connection.cursor()
cursor.execute("""
SELECT so.source_id, so.skeleton_id
FROM skeleton_origin so
JOIN UNNEST(%(source_ids)s::bigint[]) query(source_id)
ON query.source_id = so.source_id
JOIN data_source ds
ON ds.id = so.data_source_id
WHERE so.project_id = %(project_id)s
AND ds.project_id = %(project_id)s
AND ds.url = %(source_url)s
AND ds.source_project_id = %(source_project_id)s
""", {
'source_ids': source_ids,
'project_id': project_id,
'source_url': source_url,
'source_project_id': source_project_id,
})
return JsonResponse(dict(cursor.fetchall()))
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def connectivity_counts(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get the number of synapses per type for r a set of skeletons.
Returns an object with to fields. The first, `connectivity`, is a mapping
from skeleton ID to objects that map a relation ID to connectivity count for
that particular relation. The second field of the returned object,
`relations`, maps relation IDs used in the first field to relation names.
---
parameters:
- name: project_id
description: Project of work in
type: integer
paramType: path
required: true
- name: count_partner_links
description: Whether to count partner links or links to a connector.
type: boolean
paramType: path
default: true
required: false
- name: source_relations[]
description: A list of pre-connector relations that have to be used
default: []
required: false
type: array
items:
type: string
paramType: form
- name: target_relations[]
description: A list of post-connector relations that have to be used
default: []
required: false
type: array
items:
type: string
paramType: form
- name: skeleton_ids[]
description: IDs of the skeletons whose partners to count
required: true
type: array
items:
type: integer
paramType: form
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
count_partner_links = get_request_bool(data, 'count_partner_links', True)
source_relations = get_request_list(data, 'source_relations', default=[])
target_relations = get_request_list(data, 'target_relations', default=[])
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
source_relation_ids = map(lambda r: relations[r], source_relations)
target_relation_ids = map(lambda r: relations[r], target_relations)
extra_select = []
if count_partner_links:
if target_relation_ids:
extra_target_check = """
AND tc2.relation_id IN ({})
""".format(','.join(map(str, target_relation_ids)))
else:
extra_target_check = ""
extra_select.append(f"""
JOIN treenode_connector tc2
ON tc.connector_id = tc2.connector_id
AND tc.id <> tc2.id
{extra_target_check}
""")
if source_relation_ids:
extra_source_check = """
AND tc.relation_id IN ({})
""".format(','.join(map(str, source_relation_ids)))
else:
extra_source_check = ""
cursor = connection.cursor()
cursor.execute("""
SELECT tc.skeleton_id, tc.relation_id, COUNT(tc)
FROM treenode_connector tc
JOIN UNNEST(%(skeleton_ids)s::bigint[]) skeleton(id)
ON skeleton.id = tc.skeleton_id
{extra_select}
WHERE tc.project_id = %(project_id)s
{extra_source_check}
GROUP BY tc.skeleton_id, tc.relation_id
""".format(**{
'extra_select': '\n'.join(extra_select),
'extra_source_check': extra_source_check,
}), {
'project_id': project_id,
'skeleton_ids': skeleton_ids,
})
connectivity:Dict = {}
seen_relations = set()
for row in cursor.fetchall():
skeletton_entry = connectivity.get(row[0])
if not skeletton_entry:
skeletton_entry = {}
connectivity[row[0]] = skeletton_entry
seen_relations.add(row[1])
skeletton_entry[row[1]] = row[2]
if seen_relations:
relations = dict((v,k) for k,v in relations.items() if v in seen_relations)
else:
relations = {}
return JsonResponse({
'connectivity': connectivity,
'relations': dict(relations),
})
def check_annotations_on_split(project_id, skeleton_id, over_annotation_set,
under_annotation_set) -> bool:
""" With respect to annotations, a split is only correct if one part keeps
the whole set of annotations.
"""
# Get current annotation set
annotation_query = create_annotation_query(project_id,
{'skeleton_id': skeleton_id})
# Check if current set is equal to under or over set
current_annotation_set = frozenset(a.name for a in annotation_query)
if not current_annotation_set.difference(over_annotation_set):
return True
if not current_annotation_set.difference(under_annotation_set):
return True
return False
def check_new_annotations(project_id, user, entity_id, annotation_set) -> bool:
""" With respect to annotations, the new annotation set is only valid if the
user doesn't remove annotations for which (s)he has no permissions.
"""
# Get current annotation links
annotation_links = ClassInstanceClassInstance.objects.filter(
project_id=project_id,
class_instance_b__class_column__class_name='annotation',
relation__relation_name='annotated_with',
class_instance_a_id=entity_id).values_list(
'class_instance_b__name', 'id', 'user')
# Build annotation name indexed dict to the link's id and user
annotations = {link[0]:(link[1], link[2]) for link in annotation_links}
current_annotation_set = frozenset(annotations.keys())
# If the current annotation set is not included completely in the new
# set, we have to check if the user has permissions to edit the missing
# annotations.
removed_annotations = current_annotation_set - annotation_set
for rl in removed_annotations:
try:
can_edit_or_fail(user, annotations[rl][0],
'class_instance_class_instance')
except:
return False
# Otherwise, everything is fine
return True
def check_annotations_on_join(project_id, user, from_neuron_id, to_neuron_id,
ann_set) -> bool:
""" With respect to annotations, a join is only correct if the user doesn't
remove annotations for which (s)he has no permissions.
"""
return check_new_annotations(project_id, user, from_neuron_id, ann_set) and \
check_new_annotations(project_id, user, to_neuron_id, ann_set)
@requires_user_role(UserRole.Annotate)
def split_skeleton(request:HttpRequest, project_id=None) -> JsonResponse:
""" The split is only possible if the neuron is not locked or if it is
locked by the current user or if the current user belongs to the group of
the user who locked it. Of course, the split is also possible if the
current user is a super-user. Also, all reviews of the treenodes in the new
neuron are updated to refer to the new skeleton.
If upstream_annotation_map or downstream_annotation_map are not defined,
this is interpreted as keeping all annotations for the respective skeleton.
"""
treenode_id = int(request.POST['treenode_id'])
treenode = Treenode.objects.get(pk=treenode_id)
skeleton_id = treenode.skeleton_id
project_id = int(project_id)
upstream_annotation_map = request.POST.get('upstream_annotation_map')
downstream_annotation_map = request.POST.get('downstream_annotation_map')
cursor = connection.cursor()
# If no annotation map was specified for either winning and losing
# skeleton, get the current annotation data.
if not upstream_annotation_map or not downstream_annotation_map:
current_annotations = annotations_for_skeleton(project_id, skeleton_id)
if upstream_annotation_map:
upstream_annotation_map = json.loads(upstream_annotation_map)
else:
upstream_annotation_map = current_annotations
if downstream_annotation_map:
downstream_annotation_map = json.loads(downstream_annotation_map)
else:
downstream_annotation_map = current_annotations
# Check if the treenode is root!
if not treenode.parent:
raise ValueError('Can\'t split at the root node: it doesn\'t have a parent.')
treenode_parent = treenode.parent
# Check if annotations are valid
if not check_annotations_on_split(project_id, skeleton_id,
frozenset(upstream_annotation_map.keys()),
frozenset(downstream_annotation_map.keys())):
raise ValueError("Annotation distribution is not valid for splitting. " \
"One part has to keep the whole set of annotations!")
skeleton = ClassInstance.objects.select_related('user').get(pk=skeleton_id)
# retrieve neuron of this skeleton
neuron = ClassInstance.objects.get(
cici_via_b__relation__relation_name='model_of',
cici_via_b__class_instance_a_id=skeleton_id)
# Make sure the user has permissions to edit
can_edit_class_instance_or_fail(request.user, neuron.id, 'neuron')
# Extend annotation maps with creation time and edition time of the link to
# neuron to make sure these dates won't change during the split.
upstream_annotation_map = make_annotation_map(upstream_annotation_map, neuron.id, cursor)
downstream_annotation_map = make_annotation_map(downstream_annotation_map, neuron.id, cursor)
# Retrieve the id, parent_id of all nodes in the skeleton. Also
# pre-emptively lock all treenodes and connectors in the skeleton to prevent
# race conditions resulting in inconsistent skeleton IDs from, e.g., node
# creation or update.
cursor.execute('''
SELECT 1 FROM treenode_connector tc WHERE tc.skeleton_id = %s
ORDER BY tc.id
FOR NO KEY UPDATE OF tc;
SELECT t.id, t.parent_id FROM treenode t WHERE t.skeleton_id = %s
ORDER BY t.id
FOR NO KEY UPDATE OF t
''', (skeleton_id, skeleton_id)) # no need to sanitize
# build the networkx graph from it
graph = nx.DiGraph()
for row in cursor.fetchall():
graph.add_node( row[0] )
if row[1]:
# edge from parent_id to id
graph.add_edge( row[1], row[0] )
# find downstream nodes starting from target treenode_id
# and generate the list of IDs to change, starting at treenode_id (inclusive)
change_list = nx.bfs_tree(graph, treenode_id).nodes()
if not change_list:
# When splitting an end node, the bfs_tree doesn't return any nodes,
# which is surprising, because when the splitted tree has 2 or more nodes
# the node at which the split is made is included in the list.
change_list.append(treenode_id)
# create a new skeleton
new_skeleton = ClassInstance()
new_skeleton.name = 'Skeleton'
new_skeleton.project_id = project_id
new_skeleton.user = skeleton.user # The same user that owned the skeleton to split
new_skeleton.class_column = Class.objects.get(class_name='skeleton', project_id=project_id)
new_skeleton.save()
new_skeleton.name = f'Skeleton {new_skeleton.id}' # This could be done with a trigger in the database
new_skeleton.save()
# Create new neuron
new_neuron = ClassInstance()
new_neuron.name = 'Neuron'
new_neuron.project_id = project_id
new_neuron.user = skeleton.user
new_neuron.class_column = Class.objects.get(class_name='neuron',
project_id=project_id)
new_neuron.save()
new_neuron.name = 'Neuron %s' % str(new_neuron.id)
new_neuron.save()
# Assign the skeleton to new neuron
cici = ClassInstanceClassInstance()
cici.class_instance_a = new_skeleton
cici.class_instance_b = new_neuron
cici.relation = Relation.objects.get(relation_name='model_of', project_id=project_id)
cici.user = skeleton.user # The same user that owned the skeleton to split
cici.project_id = project_id
cici.save()
# Update skeleton IDs for treenodes, treenode_connectors, and reviews.
# TODO: No need to duplicate the change_list array three times in the query.
# Use a CTE
cursor.execute("""
UPDATE treenode
SET skeleton_id = %(new_skeleton_id)s
WHERE id = ANY(%(change_list)s::bigint[]);
UPDATE treenode_connector
SET skeleton_id = %(new_skeleton_id)s
WHERE treenode_id = ANY(%(change_list)s::bigint[]);
UPDATE review
SET skeleton_id = %(new_skeleton_id)s
WHERE treenode_id = ANY(%(change_list)s::bigint[]);
""", {'new_skeleton_id': new_skeleton.id, 'change_list': change_list})
# setting new root treenode's parent to null
Treenode.objects.filter(id=treenode_id).update(parent=None, editor=request.user)
# Update annotations of existing neuron to have only over set
if upstream_annotation_map:
_update_neuron_annotations(project_id, neuron.id,
upstream_annotation_map)
# Update annotations of under skeleton
_annotate_entities(project_id, [new_neuron.id], downstream_annotation_map)
# If samplers reference this skeleton, make sure they are updated as well
sampler_info = prune_samplers(skeleton_id, graph, treenode_parent, treenode)
# Log the location of the node at which the split was done
location = (treenode.location_x, treenode.location_y, treenode.location_z)
insert_into_log(project_id, request.user.id, "split_skeleton", location,
f"Split skeleton with ID {skeleton_id} (neuron: {neuron.name})")
response = {
'new_skeleton_id': new_skeleton.id,
'existing_skeleton_id': skeleton_id,
'x': treenode.location_x,
'y': treenode.location_y,
'z': treenode.location_z,
}
if sampler_info and sampler_info['n_samplers'] > 0:
response['samplers'] = {
'n_deleted_intervals': sampler_info['n_deleted_intervals'],
'n_deleted_domains': sampler_info['n_deleted_domains'],
}
return JsonResponse(response)
def create_subgraph(source_graph, target_graph, start_node, end_nodes) -> None:
"""Extract a subgraph out of <source_graph> into <target_graph>.
"""
working_set = [start_node]
# Create a graph for the domain
while working_set:
current_node = working_set.pop(0)
for n in source_graph.successors_iter(current_node):
target_graph.add_path([current_node,n])
if n not in end_nodes:
working_set.append(n)
def prune_samplers(skeleton_id, graph, treenode_parent, treenode):
"""Handle samplers and their domains and intervals during a skeleton split.
skeleton_id: the skeleton to work with
graph: the networkx DiGraph of the skeleton, directed from parent TO child
(!). This is different from the database model.
treenode_parent: the parent node of the split node
treenode: the split node
"""
samplers = Sampler.objects.prefetch_related('samplerdomain_set',
'samplerdomain_set__samplerdomainend_set',
'samplerdomain_set__samplerinterval_set').filter(skeleton_id=skeleton_id)
n_samplers = len(samplers)
# Return early if there are no samplers
if not n_samplers:
return None
n_deleted_sampler_intervals = 0
n_deleted_sampler_domains = 0
for sampler in samplers:
# Each sampler references the skeleton through domains and
# intervals. If the split off part intersects with the domain, the
# domain needs to be adjusted.
for domain in sampler.samplerdomain_set.all():
domain_ends = domain.samplerdomainend_set.all()
domain_end_map = dict(map(lambda de: (de.end_node_id, de.id), domain_ends))
domain_end_ids = set(domain_end_map.keys())
# Construct a graph for the domain and split it too.
domain_graph = nx.DiGraph()
create_subgraph(graph, domain_graph, domain.start_node_id, domain_end_ids)
# If the subgraph is empty, this domain doesn't intersect with
# the split off part. Therefore, this domain needs no update.
if domain_graph.size() == 0:
continue
# If the node where the split happens isn't part of this domain
# subgraph, there are two cases if the split treenode is outside the
# domain: 1. The treenode is downstream of the domain or 2. the
# treenode is upstream of the graph. In case 1, this domain will not
# be affected by this split because the split-off parts is entirely
# downstram of this domain. In case 2, this domain is entirely on
# the split-off fragment and we can move the entire domain to the
# split-off part. For now we delete it for consistency.
if treenode.id not in domain_graph:
# Try to find path from the treenode to a domain node, or a
# common ancestor. If it the domain node is upstream, we can get
# the distance. Otherwise an exception is raised. Note: this is
# a directed graph and in this particular case we expect edges
# to go from parent nodes to child nodes.
if nx.has_path(graph, treenode.id, domain.start_node_id):
# Case 2, the split is upstream of the domain. Delete this
# domain. TODO: Move it to the other split-off fragment.
domain_intervals = SamplerInterval.objects.filter(domain=domain)
n_deleted_sampler_intervals += domain_intervals.count()
n_deleted_sampler_domains += 1
domain_intervals.delete()
domain.delete()
continue
else:
# Case 1, the split is downstream of the domain or on
# another branch. We can ignore this domain.
continue
else:
new_sk_domain_nodes = set(nx.bfs_tree(domain_graph, treenode.id).nodes())
# At this point, we expect some intersecting domain nodes to be there.
if not new_sk_domain_nodes:
raise ValueError("Could not find any split-off domain nodes")
# Remove all explicit domain ends in split-off part. If the
# split off part leaves a branch, add a new domain end for it to
# remaining domain.
ends_to_remove = filter(lambda nid: nid in new_sk_domain_nodes, domain_end_ids)
if ends_to_remove:
domain_end_ids = set(map(lambda x: domain_end_map[x], ends_to_remove)) # type: ignore
SamplerDomainEnd.objects.filter(domain_id__in=domain_end_ids).delete()
if treenode_parent.parent_id is not None and \
domain_graph.has_node(treenode_parent.parent_id) and \
len(nx.algorithms.dag.descendants(domain_graph, treenode_parent.parent_id)) > 1:
new_domain_end = SamplerDomainEnd.objects.create(
domain=domain, end_node=treenode_parent)
# Delete domain intervals that intersect with split-off part
domain_intervals = domain.samplerinterval_set.all()
intervals_to_delete = []
for di in domain_intervals:
start_split_off = di.start_node_id in new_sk_domain_nodes
end_split_off = di.end_node_id in new_sk_domain_nodes
# If neither start or end of the interval are in split-off
# part, the interval can be ignored.
if not start_split_off and not end_split_off:
continue
# If both start and end node are split off, the whole
# interval can be removed, because the interval is
# completely in the split off part.
elif start_split_off and end_split_off:
intervals_to_delete.append(di.id)
# If the start is not split off, but the end is, the
# interval crosses the split location and has to be
# adjusted. If this makes the start and end of the remaining
# part the same, the interval is deleted, too.
elif not start_split_off and end_split_off:
if di.start_node_id == treenode_parent.id:
intervals_to_delete.append(di.id)
else:
di.end_node_id = treenode_parent.id
di.save()
# If the start is split off and the end is not, something is
# wrong and we raise an error
else:
raise ValueError("Unexpected interval: start is split "
"off, end is not")
if intervals_to_delete:
SamplerInterval.objects.filter(id__in=intervals_to_delete).delete()
n_deleted_sampler_intervals += len(intervals_to_delete)
# If the domain doesn't have any intervals left after this, it
# can be removed as well.
if len(intervals_to_delete) > 0 and \
len(domain_intervals) == len(intervals_to_delete):
domain.delete()
n_deleted_sampler_domains += 1
return {
'n_samplers': len(samplers),
'n_deleted_domains': n_deleted_sampler_domains,
'n_deleted_intervals': n_deleted_sampler_intervals,
}
@api_view(['GET'])
@never_cache
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def root_for_skeleton(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
"""Retrieve ID and location of the skeleton's root treenode.
---
type:
root_id:
type: integer
required: true
x:
type: number
format: double
required: true
y:
type: number
format: double
required: true
z:
type: number
format: double
required: true
"""
tn = Treenode.objects.get(
project=project_id,
parent__isnull=True,
skeleton_id=skeleton_id)
return JsonResponse({
'root_id': tn.id,
'x': tn.location_x,
'y': tn.location_y,
'z': tn.location_z})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_ancestry(request:HttpRequest, project_id=None) -> JsonResponse:
# All of the values() things in this function can be replaced by
# prefetch_related when we upgrade to Django 1.4 or above
skeleton_id = int(request.POST.get('skeleton_id', None))
if skeleton_id is None:
raise ValueError('A skeleton id has not been provided!')
relation_map = get_relation_to_id_map(project_id)
for rel in ['model_of', 'part_of']:
if rel not in relation_map:
raise ValueError(' => "Failed to find the required relation %s' % rel)
response_on_error = ''
try:
response_on_error = 'The search query failed.'
neuron_rows = ClassInstanceClassInstance.objects.filter(
class_instance_a=skeleton_id,
relation=relation_map['model_of']).values(
'class_instance_b',
'class_instance_b__name')
neuron_count = neuron_rows.count()
if neuron_count == 0:
raise ValueError('No neuron was found that the skeleton %s models' % skeleton_id)
elif neuron_count > 1:
raise ValueError('More than one neuron was found that the skeleton %s models' % skeleton_id)
parent_neuron = neuron_rows[0]
ancestry = []
ancestry.append({
'name': parent_neuron['class_instance_b__name'],
'id': parent_neuron['class_instance_b'],
'class': 'neuron'})
# Doing this query in a loop is horrible, but it should be very rare
# for the hierarchy to be more than 4 deep or so. (This is a classic
# problem of not being able to do recursive joins in pure SQL.)
# Detects erroneous cyclic hierarchy.
current_ci = parent_neuron['class_instance_b']
seen = set([current_ci])
while True:
response_on_error = 'Could not retrieve parent of class instance %s' % current_ci
parents = ClassInstanceClassInstance.objects.filter(
class_instance_a=current_ci,
relation=relation_map['part_of']).values(
'class_instance_b__name',
'class_instance_b',
'class_instance_b__class_column__class_name')
parent_count = parents.count()
if parent_count == 0:
break # We've reached the top of the hierarchy.
elif parent_count > 1:
raise ValueError('More than one class_instance was found that the class_instance %s is part_of.' % current_ci)
else:
parent = parents[0]
ancestry.append({
'name': parent['class_instance_b__name'],
'id': parent['class_instance_b'],
'class': parent['class_instance_b__class_column__class_name']
})
current_ci = parent['class_instance_b']
if current_ci in seen:
raise Exception('Cyclic hierarchy detected for skeleton #%s' % skeleton_id)
return JsonResponse(ancestry, safe=False)
except Exception as e:
raise Exception(response_on_error + ':' + str(e))
def _connected_skeletons(skeleton_ids, op, relation_id_1, relation_id_2,
model_of_id, cursor, with_nodes=False):
def newSynapseCounts():
return [0, 0, 0, 0, 0]
class Partner:
def __init__(self):
self.num_nodes = 0
self.skids:DefaultDict[Any, List[int]] = defaultdict(newSynapseCounts) # skid vs synapse count
if with_nodes:
self.links:List = []
# Dictionary of partner skeleton ID vs Partner
def newPartner():
return Partner()
partners:DefaultDict[Any, Partner] = defaultdict(newPartner)
# Obtain the synapses made by all skeleton_ids considering the desired
# direction of the synapse, as specified by relation_id_1 and relation_id_2:
cursor.execute('''
SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence),
t1.treenode_id, t2.treenode_id, t1.connector_id
FROM treenode_connector t1,
treenode_connector t2
WHERE t1.skeleton_id = ANY(%s::bigint[])
AND t1.relation_id = %s
AND t1.connector_id = t2.connector_id
AND t1.id != t2.id
AND t2.relation_id = %s
''', (list(skeleton_ids), int(relation_id_1), int(relation_id_2)))
# Sum the number of synapses
for srcID, partnerID, confidence, tn1, tn2, connector_id in cursor.fetchall():
partner = partners[partnerID]
partner.skids[srcID][confidence - 1] += 1
if with_nodes:
partner.links.append([tn1, tn2, srcID, connector_id])
# There may not be any synapses
if not partners:
return partners, []
# If op is AND, discard entries where only one of the skids has synapses
if len(skeleton_ids) > 1 and 'AND' == op:
for partnerID in list(partners.keys()): # keys() is a copy of the keys
if len(skeleton_ids) != len(partners[partnerID].skids):
del partners[partnerID]
# With AND it is possible that no common partners exist
if not partners:
return partners, []
# Obtain unique partner skeletons
partner_skids = list(partners.keys())
# Count nodes of each partner skeleton
cursor.execute('''
SELECT skeleton_id, num_nodes
FROM catmaid_skeleton_summary
WHERE skeleton_id = ANY(%s::bigint[])
''', (partner_skids,))
for row in cursor.fetchall():
partners[row[0]].num_nodes = row[1]
# Find which reviewers have reviewed any partner skeletons
cursor.execute('''
SELECT DISTINCT reviewer_id
FROM review
WHERE skeleton_id = ANY(%s::bigint[])
''', (partner_skids,))
reviewers = [row[0] for row in cursor]
return partners, reviewers
def _skeleton_info_raw(project_id, skeletons, op, with_nodes=False,
allowed_link_types=None):
cursor = connection.cursor()
# Obtain the IDs of the 'presynaptic_to', 'postsynaptic_to' and 'model_of' relations
relation_ids = get_relation_to_id_map(project_id)
def prepare(partners):
for partnerID in partners.keys():
partner = partners[partnerID]
skids = partner.skids
# jsonize: swap class instance by its dict of members vs values
if partner.skids:
partners[partnerID] = partner.__dict__
else:
del partners[partnerID]
return partners
skeleton_info = {}
for link_type in LINK_TYPES:
partner_reference = link_type['partner_reference']
if allowed_link_types and partner_reference not in allowed_link_types:
continue
connectivity, reviews = _connected_skeletons(skeletons, op,
relation_ids[link_type['relation']],
relation_ids[link_type['partner_relation']],
relation_ids['model_of'], cursor, with_nodes)
skeleton_info[partner_reference] = prepare(connectivity)
skeleton_info[partner_reference + '_reviewers'] = reviews
return skeleton_info
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_info_raw(request:HttpRequest, project_id=None) -> JsonResponse:
"""Retrieve a list of down/up-stream partners of a set of skeletons.
From a queried set of source skeletons, find all upstream and downstream
partners, the number of synapses between each source and each partner,
and a list of reviewers for each partner set. Confidence distributions for
each synapse count are included. Optionally find only those partners
that are common between the source skeleton set.
---
parameters:
- name: source_skeleton_ids
description: IDs of the skeletons whose partners to find
required: true
type: array
items:
type: integer
paramType: form
- name: boolean_op
description: |
Whether to find partners of any source skeleton ("OR") or partners
common to all source skeletons ("AND")
required: true
type: string
paramType: form
- name: with_nodes
description: |
Whether to return detailed connectivity information that includes
partner sites.
required: false
type: voolean
paramType: form
default: false
- name: link_types
description: |
A list of allowed link types: incoming, outgoing, abutting,
gapjunction, tightjunction, desmosome, attachment, close_object.
type: array
items:
type: string
required: false
defaultValue: [incoming, outgoing]
models:
skeleton_info_raw_partners:
id: skeleton_info_raw_partners
properties:
'{skeleton_id}':
$ref: skeleton_info_raw_partner
description: Map from partners' skeleton IDs to their information
required: true
skeleton_info_raw_partner:
id: skeleton_info_raw_partner
properties:
skids:
$ref: skeleton_info_raw_partner_counts
required: true
num_nodes:
description: The number of treenodes in this skeleton
required: true
type: integer
skeleton_info_raw_partner_counts:
id: skeleton_info_raw_partner_counts
properties:
'{skeleton_id}':
$ref: skeleton_info_raw_partner_count
description: |
Synapse counts between the partner and the source skeleton with
this ID
required: true
skeleton_info_raw_partner_count:
id: skeleton_info_raw_partner_count
properties:
- description: Number of synapses with confidence 1
type: integer
required: true
- description: Number of synapses with confidence 2
type: integer
required: true
- description: Number of synapses with confidence 3
type: integer
required: true
- description: Number of synapses with confidence 4
type: integer
required: true
- description: Number of synapses with confidence 5
type: integer
required: true
type:
incoming:
$ref: skeleton_info_raw_partners
description: Upstream synaptic partners
required: true
outgoing:
$ref: skeleton_info_raw_partners
description: Downstream synaptic partners
required: true
gapjunctions:
$ref: skeleton_info_raw_partners
description: Gap junction partners
required: true
incoming_reviewers:
description: IDs of reviewers who have reviewed any upstream partners.
required: true
type: array
items:
type: integer
outgoing_reviewers:
description: IDs of reviewers who have reviewed any downstream partners.
required: true
type: array
items:
type: integer
gapjunctions_reviewers:
description: IDs of reviewers who have reviewed any gap junction partners.
required: true
type: array
items:
type: integer
"""
# sanitize arguments
project_id = int(project_id)
skeleton_ids = get_request_list(request.POST, 'source_skeleton_ids', map_fn=int)
op = str(request.POST.get('boolean_op', "OR")).upper() # values: AND, OR
if op not in ("AND", "OR"):
raise ValueError("boolean_op should be 'AND' or 'OR'")
with_nodes = get_request_bool(request.POST, 'with_nodes', False)
allowed_link_types = get_request_list(
request.POST, 'link_types', ['incoming', 'outgoing']
)
skeleton_info = _skeleton_info_raw(
project_id, skeleton_ids, op, with_nodes, allowed_link_types
)
return JsonResponse(skeleton_info)
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def connectivity_matrix(request:HttpRequest, project_id=None) -> JsonResponse:
"""
Return a sparse connectivity matrix representation for the given skeleton
IDs. The returned dictionary has a key for each row skeleton having
outgoing connections to one or more column skeletons. Each entry stores a
dictionary that maps the connection partners to the individual outgoing
synapse counts.
---
parameters:
- name: project_id
description: Project of skeletons
type: integer
paramType: path
required: true
- name: rows
description: IDs of row skeletons
required: true
type: array
items:
type: integer
paramType: form
- name: columns
description: IDs of column skeletons
required: true
type: array
items:
type: integer
paramType: form
- name: with_locations
description: Whether or not to return locations of connectors
required: false
default: false
type: boolean
paramType: form
"""
# sanitize arguments
project_id = int(project_id)
rows = tuple(get_request_list(request.POST, 'rows', [], map_fn=int))
cols = tuple(get_request_list(request.POST, 'columns', [], map_fn=int))
with_locations = get_request_bool(request.POST, 'with_locations', False)
matrix = get_connectivity_matrix(project_id, rows, cols,
with_locations=with_locations)
return JsonResponse(matrix)
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def connectivity_matrix_csv(request:HttpRequest, project_id) -> StreamingHttpResponse:
"""
Return a CSV file that represents the connectivity matrix of a set of row
skeletons and a set of column skeletons.
---
parameters:
- name: project_id
description: Project of skeletons
type: integer
paramType: path
required: true
- name: rows
description: IDs of row skeletons
required: true
type: array
items:
type: integer
paramType: form
- name: columns
description: IDs of column skeletons
required: true
type: array
items:
type: integer
paramType: form
- name: names
description: |
An optional mapping of skeleton IDs versus names.
Represented as a list of two-element lists. Each inner list
follows the form [<skeleton-id>, <name>].
required: false
type: array
items:
type: string
"""
# sanitize arguments
project_id = int(project_id)
rows = tuple(get_request_list(request.POST, 'rows', [], map_fn=int))
cols = tuple(get_request_list(request.POST, 'columns', [], map_fn=int))
names:Dict = dict(map(lambda x: (int(x[0]), x[1]), get_request_list(request.POST, 'names', [])))
matrix = get_connectivity_matrix(project_id, rows, cols)
csv_data = []
header = [''] + list(map(lambda x: names.get(x, x), cols))
csv_data.append(header)
for n, skid_a in enumerate(rows):
# Add row header skeleton ID
row = [names.get(skid_a, skid_a)]
csv_data.append(row)
# Add connectivity information
for m, skid_b in enumerate(cols):
p = matrix.get(skid_a, {})
c = p.get(skid_b, 0)
row.append(c)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer, quoting=csv.QUOTE_NONNUMERIC)
response = StreamingHttpResponse((writer.writerow(row) for row in csv_data), # type: ignore
content_type='text/csv')
filename = 'catmaid-connectivity-matrix.csv'
response['Content-Disposition'] = f'attachment; filename={filename}'
return response
def get_connectivity_matrix(project_id, row_skeleton_ids, col_skeleton_ids,
with_locations=False) -> DefaultDict[Any, Dict]:
"""
Return a sparse connectivity matrix representation for the given skeleton
IDS. The returned dictionary has a key for each row skeleton having
outgoing connections to one or more column skeletons. Each entry stores a
dictionary that maps the connection partners to the individual outgoing
synapse counts.
"""
cursor = connection.cursor()
relation_map = get_relation_to_id_map(project_id)
post_rel_id = relation_map['postsynaptic_to']
pre_rel_id = relation_map['presynaptic_to']
if with_locations:
extra_select = ', c.id, c.location_x, c.location_y, c.location_z'
extra_join = 'JOIN connector c ON c.id = t2.connector_id'
else:
extra_select = ''
extra_join = ''
# Obtain all synapses made between row skeletons and column skeletons.
cursor.execute('''
SELECT t1.skeleton_id, t2.skeleton_id
{extra_select}
FROM treenode_connector t1,
treenode_connector t2
{extra_join}
WHERE t1.skeleton_id = ANY(%(row_skeleton_ids)s::bigint[])
AND t2.skeleton_id = ANY(%(col_skeleton_ids)s::bigint[])
AND t1.connector_id = t2.connector_id
AND t1.relation_id = %(pre_rel_id)s
AND t2.relation_id = %(post_rel_id)s
'''.format(extra_select=extra_select, extra_join=extra_join), {
'row_skeleton_ids': list(row_skeleton_ids),
'col_skeleton_ids': list(col_skeleton_ids),
'pre_rel_id': pre_rel_id,
'post_rel_id': post_rel_id
})
# Build a sparse connectivity representation. For all skeletons requested
# map a dictionary of partner skeletons and the number of synapses
# connecting to each partner. If locations should be returned as well, an
# object with the fields 'count' and 'locations' is returned instead of a
# single count.
if with_locations:
outgoing:DefaultDict[Any, Dict] = defaultdict(dict)
for r in cursor.fetchall():
source, target = r[0], r[1]
mapping = outgoing[source]
connector_id = r[2]
info = mapping.get(target)
if not info:
info = { 'count': 0, 'locations': {} }
mapping[target] = info
count = info['count']
info['count'] = count + 1
if connector_id not in info['locations']:
location = [r[3], r[4], r[5]]
info['locations'][connector_id] = {
'pos': location,
'count': 1,
}
else:
info['locations'][connector_id]['count'] += 1
else:
outgoing = defaultdict(dict)
for r in cursor.fetchall():
source, target = r[0], r[1]
mapping = outgoing[source]
count = mapping.get(target, 0)
mapping[target] = count + 1
return outgoing
@api_view(['POST'])
@requires_user_role([UserRole.Browse, UserRole.Annotate])
def review_status(request:HttpRequest, project_id=None) -> JsonResponse:
"""Retrieve the review status for a collection of skeletons.
The review status for each skeleton in the request is a tuple of total
nodes and number of reviewed nodes (integers). The reviews of only
certain users or a reviewer team may be counted instead of all reviews.
---
parameters:
- name: skeleton_ids[]
description: IDs of the skeletons to retrieve.
required: true
type: array
items:
type: integer
paramType: form
- name: whitelist
description: |
ID of the user whose reviewer team to use to filter reviews
(exclusive to user_ids)
type: integer
paramType: form
- name: user_ids[]
description: |
IDs of the users whose reviews should be counted (exclusive
to whitelist)
type: array
items:
type: integer
paramType: form
models:
review_status_tuple:
id: review_status_tuple
properties:
- description: Total number of treenodes in the skeleton
type: integer
required: true
- description: |
Number of reviewed treenodes in the skeleton matching filters
(if any)
type: integer
required: true
type:
'{skeleton_id}':
$ref: review_status_tuple
required: true
"""
skeleton_ids = set(int(v) for k,v in request.POST.items() if k.startswith('skeleton_ids['))
whitelist = get_request_bool(request.POST, 'whitelist', False)
whitelist_id = None
user_ids = None
if whitelist:
whitelist_id = request.user.id
else:
user_ids = set(int(v) for k,v in request.POST.items() if k.startswith('user_ids['))
status = get_review_status(skeleton_ids, project_id=project_id,
whitelist_id=whitelist_id, user_ids=user_ids)
return JsonResponse(status)
@requires_user_role(UserRole.Annotate)
def reroot_skeleton(request:HttpRequest, project_id=None) -> JsonResponse:
""" Any user with an Annotate role can reroot any skeleton.
"""
treenode_id = request.POST.get('treenode_id', None)
treenode = _reroot_skeleton(treenode_id, project_id)
response_on_error = ''
try:
if treenode:
response_on_error = 'Failed to log reroot.'
location = (treenode.location_x, treenode.location_y, treenode.location_z)
insert_into_log(project_id, request.user.id, 'reroot_skeleton',
location, 'Rerooted skeleton for '
'treenode with ID %s' % treenode.id)
return JsonResponse({'newroot': treenode.id,
'skeleton_id': treenode.skeleton_id})
# Else, already root
raise ValueError(f'Node #{treenode_id} is already root!')
except Exception as e:
raise ValueError(response_on_error + ':' + str(e))
def _reroot_skeleton(treenode_id, project_id):
""" Returns the treenode instance that is now root,
or False if the treenode was root already. """
if treenode_id is None:
raise ValueError('A treenode id has not been provided!')
response_on_error = ''
try:
response_on_error = 'Failed to select treenode with id %s.' % treenode_id
rootnode = Treenode.objects.get(id=treenode_id, project=project_id)
# Obtain the treenode from the response
first_parent = rootnode.parent_id
# If no parent found it is assumed this node is already root
if first_parent is None:
return False
q_treenode = Treenode.objects.filter(
skeleton_id=rootnode.skeleton_id,
project=project_id).values_list('id', 'parent_id', 'confidence')
nodes = {tnid: (parent_id, confidence) for (tnid, parent_id, confidence) in list(q_treenode)}
# Make sure this skeleton is not used in a sampler
samplers = Sampler.objects.prefetch_related('samplerdomain_set') \
.filter(skeleton=rootnode.skeleton)
n_samplers = len(samplers)
if n_samplers > 0:
def is_same_or_downstream(node_id, upstream_node_id):
while True:
if node_id == upstream_node_id:
return True
parent = nodes.get(node_id)
if parent:
node_id = parent[0]
else:
return False
return False
for sampler in samplers:
# Check if each sampler domain start node has a parent path to the
# new root. If it is has, nothing needs to change for these
# samplers.
for domain in sampler.samplerdomain_set.all():
if not is_same_or_downstream(domain.start_node_id, rootnode.id):
response_on_error = 'Neuron is used in a sampler, which is affected by the new root'
raise ValueError(f'Skeleton {rootnode.skeleton_id} '
f'is used in {n_samplers} sampler(s), can\'t reeroot')
# Traverse up the chain of parents, reversing the parent relationships so
# that the selected treenode (with ID treenode_id) becomes the root.
new_parents = []
new_parent = rootnode.id
new_confidence = rootnode.confidence
node = first_parent
response_on_error = 'An error occured while rerooting.'
while True:
# Store current values to be used in next iteration
parent, confidence = nodes[node]
# Set new values
new_parents.append((node, new_parent, new_confidence))
if parent is None:
# Root has been reached
break
else:
# Prepare next iteration
new_parent = node
new_confidence = confidence
node = parent
# Finally make treenode root
new_parents.append((rootnode.id, 'NULL', 5)) # Reset to maximum confidence.
cursor = connection.cursor()
cursor.execute('''
UPDATE treenode
SET parent_id = v.parent_id,
confidence = v.confidence
FROM (VALUES %s) v(id, parent_id, confidence)
WHERE treenode.id = v.id
''' % ','.join(['(%s,%s,%s)' % node for node in new_parents]))
return rootnode
except Exception as e:
raise ValueError(f'{response_on_error}: {e}')
def _root_as_parent(oid):
""" Returns True if the parent group of the given element ID is the root group. """
cursor = connection.cursor()
# Try to select the parent group of the parent group;
# if none, then the parent group is the root group.
cursor.execute('''
SELECT count(*)
FROM class_instance_class_instance cici1,
class_instance_class_instance cici2,
relation r
WHERE cici1.class_instance_a = %s
AND cici1.class_instance_b = cici2.class_instance_a
AND cici1.relation_id = r.id
AND r.relation_name = 'part_of'
AND cici2.class_instance_a = cici1.class_instance_b
AND cici2.relation_id = r.id
''' % int(oid))
return 0 == cursor.fetchone()[0]
@requires_user_role(UserRole.Annotate)
def join_skeleton(request:HttpRequest, project_id=None) -> JsonResponse:
""" An user with an Annotate role can join two skeletons if the neurons
modeled by these skeletons are not locked by another user or if the current
user belongs to the group of the user who locked the neurons. A super-user
can join any skeletons.
"""
response_on_error = 'Failed to join'
try:
from_treenode_id = int(request.POST.get('from_id', None))
to_treenode_id = int(request.POST.get('to_id', None))
annotation_set = request.POST.get('annotation_set', None)
if annotation_set:
annotation_set = json.loads(annotation_set)
sampler_handling = request.POST.get('sampler_handling', None)
lose_sampler_handling = request.POST.get('lose_sampler_handling', "keep-samplers")
from_name_reference = get_request_bool(request.POST, 'from_name_reference', False)
join_info = _join_skeleton(request.user, from_treenode_id, to_treenode_id,
project_id, annotation_set, sampler_handling, lose_sampler_handling,
from_name_reference)
response_on_error = 'Could not log actions.'
return JsonResponse({
'message': 'success',
'fromid': from_treenode_id,
'toid': to_treenode_id,
'result_skeleton_id': join_info['from_skeleton_id'],
'deleted_skeleton_id': join_info['to_skeleton_id'],
'stable_annotation_swap': join_info['stable_annotation_swap'],
})
except Exception as e:
raise ValueError(response_on_error + ':' + str(e))
def make_annotation_map(annotation_vs_user_id, neuron_id, cursor=None) -> Dict:
""" Create a mapping of annotation IDs to dictionaries with 'user_id',
'edition_time' and 'creation_time' fields.
"""
cursor = cursor or connection.cursor()
annotation_map = dict()
# Update annotation-info mapping
for annotation_id, annotator_id in annotation_vs_user_id.items():
annotation_map[annotation_id] = {
'user_id': annotator_id
}
# Extend annotation maps with creation time and edition time of the link to
# neuron to make sure these dates won't change during the split.
cursor.execute('''
SELECT ci.name, MIN(cici.creation_time), MIN(cici.edition_time)
FROM class_instance ci
JOIN class_instance_class_instance cici
ON ci.id = cici.class_instance_b
WHERE cici.class_instance_a = %s
GROUP BY ci.id
''', (neuron_id,))
for row in cursor.fetchall():
entry = annotation_map.get(row[0])
if entry:
entry['creation_time'] = row[1]
entry['edition_time'] = row[2]
return annotation_map
def get_stable_partner_annotation(project_id):
"""Find a stable partner annotation if it is configured for the passed in
project. Returns the default value 'stable' if there is no such annotation
configured. A stable annotation is stored in the "settings" client data
store, in the "skeleton-annotations" key. The front-end allows admins to
configure this in the Settings Widget > Tracing section. User-settings are
ignored, and project settings dominate instance settings. This setting
defaults to 'stable'.
"""
cursor = connection.cursor()
cursor.execute("""
SELECT COALESCE((value->'entries'->'stable_join_annotation'->'value')::text, 'stable')
FROM client_datastore cds
JOIN client_data cd
ON cd.datastore_id = cds.id
WHERE cds.name = 'settings'
AND user_id IS NULL
AND cd.key = 'skeleton-annotations'
ORDER BY cd.project_id NULLS LAST
LIMIT 1
""", {
'project_id': project_id,
})
row = cursor.fetchone()
return row[0] if row else 'stable'
def _join_skeleton(user, from_treenode_id, to_treenode_id, project_id,
annotation_map, sampler_handling=None, lose_sampler_handling='keep-samplers',
from_name_reference=False) -> Dict[str, Any]:
""" Take the IDs of two nodes, each belonging to a different skeleton, and
make to_treenode be a child of from_treenode, and join the nodes of the
skeleton of to_treenode into the skeleton of from_treenode, and delete the
former skeleton of to_treenode. All annotations in annotation_set will be
linked to the skeleton of to_treenode. It is expected that <annotation_map>
is a dictionary, mapping an annotation to an annotator ID. Also, all
reviews of the skeleton that changes ID are changed to refer to the new
skeleton ID. If annotation_map is None, the resulting skeleton will have
all annotations available on both skeletons combined.
If samplers link to one or both ot the input skeletons, a sampler handling
mode is required. Otherwise, the merge operation is canceled.
If from_name_reference is enabled, the target skeleton will get a new
annotation that references the merged in skeleton using its name.
"""
if from_treenode_id is None or to_treenode_id is None:
raise ValueError('Missing arguments to _join_skeleton')
response_on_error = ''
try:
from_treenode_id = int(from_treenode_id)
to_treenode_id = int(to_treenode_id)
try:
from_treenode = Treenode.objects.get(pk=from_treenode_id)
except Treenode.DoesNotExist:
raise ValueError("Could not find a skeleton for treenode #%s" % from_treenode_id)
try:
to_treenode = Treenode.objects.get(pk=to_treenode_id)
except Treenode.DoesNotExist:
raise ValueError("Could not find a skeleton for treenode #%s" % to_treenode_id)
from_skid = from_treenode.skeleton_id
from_neuron = _get_neuronname_from_skeletonid( project_id, from_skid )
to_skid = to_treenode.skeleton_id
to_neuron = _get_neuronname_from_skeletonid( project_id, to_skid )
if from_skid == to_skid:
raise ValueError('Cannot join treenodes of the same skeleton, this would introduce a loop.')
# Make sure the user has permissions to edit both neurons
can_edit_class_instance_or_fail(
user, from_neuron['neuronid'], 'neuron')
can_edit_class_instance_or_fail(
user, to_neuron['neuronid'], 'neuron')
cursor = connection.cursor()
# Test if either join partner is marked as stable. If this is the case,
# the neuron marked as stable is enforced to be the winner of the join.
# If both join partners are marked stable, the join is canceld. This
# behavior can be disabled by not passing in a stable annotation.
stable_induced_swap = False
stable_annotation = get_stable_partner_annotation(project_id)
if stable_annotation:
cursor.execute("""
SELECT array_agg(n.id)
FROM class_instance n
JOIN class_instance_class_instance cici
ON cici.class_instance_a = n.id
JOIN class_instance a
ON a.id = cici.class_instance_b
WHERE cici.relation_id = (
SELECT id FROM relation
WHERE project_id = %(project_id)s
AND relation_name = 'annotated_with'
LIMIT 1)
AND a.name = %(stable_annotation)s
AND n.project_id = %(project_id)s
AND n.id IN (%(partner_a)s, %(partner_b)s)
""", {
'project_id': project_id,
'stable_annotation': stable_annotation,
'partner_a': from_neuron['neuronid'],
'partner_b': to_neuron['neuronid'],
})
stable_result = cursor.fetchone()
stable_neuron_ids = set(stable_result[0]) if stable_result and stable_result[0] else set()
# If the from-neuron is marked as stable and to to-neuron isn't,
# everything is okay.
if to_neuron['neuronid'] in stable_neuron_ids:
if from_neuron['neuronid'] in stable_neuron_ids:
raise ValueError(f"Can't join skeletons {from_skid} and {to_skid}, because both are marked as stable.")
# Swap from and to, if to is marked as stable
original_from_treenode, original_from_treenode_id = from_treenode, from_treenode_id
original_from_skid, original_from_neuron = from_skid, from_neuron
from_skid, from_neuron = to_skid, to_neuron
from_treenode, from_treenode_id = to_treenode, to_treenode_id
to_skid, to_neuron = original_from_skid, original_from_neuron
to_treenode, to_treenode_id = original_from_treenode, original_from_treenode_id
stable_induced_swap = True
# We are going to change the skeleton ID of the "to" neuron, therefore
# all its nodes need to be locked to prevent modification from other
# transactions. To prevent a skeleton ID change of the "from" skeleton
# (which survives the merge), it is enough to lock the merge target
# node. The NOWAIT option results in an error if no lock can be
# obtained.
cursor.execute('''
SELECT 1 FROM treenode_connector tc
WHERE tc.skeleton_id = %(consumed_skeleton_id)s
ORDER BY tc.id
FOR NO KEY UPDATE OF tc NOWAIT;
SELECT 1 FROM treenode t
WHERE t.skeleton_id = %(consumed_skeleton_id)s
ORDER BY t.id
FOR NO KEY UPDATE OF t NOWAIT;
SELECT 1 FROM treenode t
WHERE t.id = %(target_node_id)s
ORDER BY t.id
FOR NO KEY UPDATE OF t NOWAIT;
''', {
'consumed_skeleton_id': to_skid,
'target_node_id': from_treenode_id,
})
# Check if annotations are valid, if there is a particular selection
if annotation_map is None:
# Get all current annotations of both skeletons and merge them for
# a complete result set.
from_annotation_info = get_annotation_info(project_id, (from_skid,),
annotations=True, metaannotations=False, neuronnames=False)
to_annotation_info = get_annotation_info(project_id, (to_skid,),
annotations=True, metaannotations=False, neuronnames=False)
# Create a new annotation map with the expected structure of
# 'annotationname' vs. 'annotator id'.
def merge_into_annotation_map(source, skid, target):
skeletons = source['skeletons']
if skeletons and skid in skeletons:
for a in skeletons[skid]['annotations']:
annotation = source['annotations'][a['id']]
target[annotation] = a['uid']
# Merge from after to, so that it overrides entries from the merged
# in skeleton.
annotation_map = dict()
merge_into_annotation_map(to_annotation_info, to_skid, annotation_map)
merge_into_annotation_map(from_annotation_info, from_skid, annotation_map)
else:
if not check_annotations_on_join(project_id, user,
from_neuron['neuronid'], to_neuron['neuronid'],
frozenset(annotation_map.keys())):
raise ValueError("Annotation distribution is not valid for joining. " \
"Annotations for which you don't have permissions have to be kept!")
# Find oldest creation_time and edition_time
winning_map = make_annotation_map(annotation_map, from_neuron['neuronid'])
losing_map = make_annotation_map(annotation_map, to_neuron['neuronid'])
for k,v in losing_map.items():
winning_entry = winning_map.get(k)
if winning_entry:
for field in ('creation_time', 'edition_time'):
losing_time = v.get(field)
winning_time = winning_entry.get(field)
if losing_time and winning_time:
winning_entry[field] = min(winning_time, losing_time)
if from_name_reference:
name_ref = 'Merged: ' + to_neuron['neuronname']
if name_ref not in winning_map:
winning_map[name_ref] = {
'user_id': user.id
}
# Reroot to_skid at to_treenode if necessary
response_on_error = 'Could not reroot at treenode %s' % to_treenode_id
_reroot_skeleton(to_treenode_id, project_id)
# If samplers reference this skeleton, make sure they are updated as well
sampler_info = _update_samplers_in_merge(project_id, user.id, from_skid, to_skid,
from_treenode.id, to_treenode.id, sampler_handling,
lose_sampler_handling)
# The target skeleton is removed and its treenode assumes
# the skeleton id of the from-skeleton.
response_on_error = 'Could not update Treenode table with new skeleton id for joined treenodes.'
Treenode.objects.filter(skeleton=to_skid).update(skeleton=from_skid)
cursor.execute("""
-- Set transaction user ID to update skeleton summary more precicely in trigger function.
SET LOCAL catmaid.user_id=%(user_id)s;
UPDATE treenode
SET skeleton_id = %(from_skeleton_id)s
WHERE skeleton_id = %(to_skeleton_id)s
""", {
'user_id': user.id,
'from_skeleton_id': from_skid,
'to_skeleton_id': to_skid,
})
response_on_error = 'Could not update TreenodeConnector table.'
TreenodeConnector.objects.filter(
skeleton=to_skid).update(skeleton=from_skid)
# Update reviews from 'losing' neuron to now belong to the new neuron
response_on_error = 'Could not update reviews with new skeleton IDs for joined treenodes.'
Review.objects.filter(skeleton_id=to_skid).update(skeleton=from_skid)
# Remove skeleton of to_id (deletes cicic part_of to neuron by cascade,
# leaving the parent neuron dangling in the object tree).
response_on_error = 'Could not delete skeleton with ID %s.' % to_skid
ClassInstance.objects.filter(pk=to_skid).delete()
# Update the parent of to_treenode.
response_on_error = 'Could not update parent of treenode with ID %s' % to_treenode_id
Treenode.objects.filter(id=to_treenode_id).update(parent=from_treenode_id, editor=user)
# Update linked annotations of neuron
response_on_error = 'Could not update annotations of neuron ' \
'with ID %s' % from_neuron['neuronid']
_update_neuron_annotations(project_id, from_neuron['neuronid'],
winning_map, to_neuron['neuronid'])
# Remove the 'losing' neuron if it is empty
_delete_if_empty(to_neuron['neuronid'])
from_location = (from_treenode.location_x, from_treenode.location_y,
from_treenode.location_z)
swap_info = ', partners swapped due to stable annotation' if stable_induced_swap else ''
insert_into_log(project_id, user.id, 'join_skeleton',
from_location, 'Joined skeleton with ID %s (neuron: ' \
'%s) into skeleton with ID %s (neuron: %s, annotations: %s)%s' % \
(to_skid, to_neuron['neuronname'], from_skid,
from_neuron['neuronname'], ', '.join(winning_map.keys()),
swap_info))
response = {
'from_skeleton_id': from_skid,
'to_skeleton_id': to_skid,
'stable_annotation_swap': stable_induced_swap,
}
if sampler_info and sampler_info['n_samplers'] > 0:
response['samplers'] = {
'n_deleted_intervals': sampler_info['n_deleted_intervals'],
'n_deleted_domains': sampler_info['n_deleted_domains'],
'n_added_domains': sampler_info['n_added_domains'],
'n_added_intervals': sampler_info['n_added_intervals'],
}
return response
except Exception as e:
raise ValueError(response_on_error + ':' + str(e))
def _update_samplers_in_merge(project_id, user_id, win_skeleton_id, lose_skeleton_id,
win_treenode_id, lose_treenode_id, win_sampler_handling,
lose_sampler_handling='keep-samplers') -> Optional[Dict[str, Any]]:
"""Update the sampler configuration for the passed in skeletons under the
assumption that this is part of a merge operation.
"""
samplers = Sampler.objects.prefetch_related('samplerdomain_set',
'samplerdomain_set__samplerdomainend_set',
'samplerdomain_set__samplerinterval_set').filter(
skeleton_id__in=[win_skeleton_id, lose_skeleton_id])
n_samplers = len(samplers)
# If there are no samplers linked, return early
if not n_samplers:
return None
sampler_index:DefaultDict[Any, List] = defaultdict(list)
for s in samplers:
sampler_index[s.skeleton_id].append(s)
known_win_sampler_handling_modes =("create-intervals", "branch",
"domain-end", "new-domain")
known_lose_sampler_handling_modes = ("delete-samplers", "keep-samplers")
if win_sampler_handling not in known_win_sampler_handling_modes:
raise ValueError("Samplers in use on skeletons. Unknown "
f"(winning) sampler handling mode: {win_sampler_handling}")
if lose_sampler_handling not in known_lose_sampler_handling_modes:
raise ValueError("Samplers in use on skeletons. Unknown "
f"(losing) sampler handling mode: {lose_sampler_handling}")
n_deleted_intervals = 0
n_deleted_domains = 0
n_deleted_samplers = 0
n_added_intervals = 0
n_added_domains = 0
n_added_domain_ends = 0
# If there are samplers linked to the losing skeleton, delete them if
# allowed or complain otherwise.
n_samplers_lose = len(sampler_index[lose_skeleton_id])
if n_samplers_lose:
if lose_sampler_handling == "delete-samplers":
# Delete samplers that link to losing skeleton
n_deleted, _ = Sampler.objects.filter(project_id=project_id,
skeleton_id=lose_skeleton_id).delete()
n_deleted_samplers += n_deleted
elif lose_sampler_handling == "keep-samplers":
# Make sure all samplers match the tree structure of the skeleton,
# i.e. start nodes are closer to root.
samplers = Sampler.objects.filter(project_id=project_id,
skeleton_id=lose_skeleton_id).update(skeleton_id=win_skeleton_id)
else:
raise ValueError("The losing merge skeleton is referenced " +
f"by {n_samplers_lose} sampler(s), merge aborted.")
# Construct a networkx graph for the winning skeleton
cursor = connection.cursor()
cursor.execute('''
SELECT t.id, t.parent_id FROM treenode t WHERE t.skeleton_id = %s
ORDER BY t.id
''', [win_skeleton_id])
# build the networkx graph from it
graph = nx.DiGraph()
for row in cursor.fetchall():
graph.add_node(row[0])
if row[1]:
# edge from parent_id to id
graph.add_edge(row[1], row[0])
cursor.execute('''
SELECT t.id, t.parent_id FROM treenode t WHERE t.skeleton_id = %s
ORDER BY t.id
''', [lose_skeleton_id]) # no need to sanitize
# build the networkx graph from it
lose_graph = nx.DiGraph()
for row in cursor.fetchall():
lose_graph.add_node(row[0])
if row[1]:
# edge from parent_id to id
lose_graph.add_edge(row[1], row[0])
lose_graph_end_nodes = [x for x in lose_graph.nodes_iter()
if lose_graph.out_degree(x)==0 and lose_graph.in_degree(x)==1]
regular_domain_type = SamplerDomainType.objects.get(name='regular')
# Update each sampler
n_samplers_win = len(sampler_index[win_skeleton_id])
for sampler in sampler_index[win_skeleton_id]:
# Each sampler references the skeleton through domains and
# intervals. Action is only required if the merge is performed into an
# existing domain. Therefore, iterate over domains and check if the
# merge point is in them.
# TODO What happens when merge is into interval, but into a newly traced
# branch on that interval.
matching_domains = []
for domain in sampler.samplerdomain_set.all():
domain_ends = domain.samplerdomainend_set.all()
domain_end_map = dict(map(lambda de: (de.end_node_id, de.id), domain_ends))
domain_end_ids = set(domain_end_map.keys())
# Construct a graph for the domain and split it too.
domain_graph = nx.DiGraph()
create_subgraph(graph, domain_graph, domain.start_node_id, domain_end_ids)
# If the subgraph is empty, this domain doesn't intersect with
# the split off part. Therefore, this domain needs no update.
if domain_graph.size() == 0:
continue
if domain_graph.has_node(win_treenode_id):
matching_domains.append({
'domain': domain,
'graph': domain_graph,
})
if len(matching_domains) > 1:
raise ValueError("The merge point is part of multiple sampler "
"domains in the same sampler, please pick one of the "
"adjecent points.")
# If the merge point is not part of any domain in this sampler,
# continue. No update is needed here.
if len(matching_domains) == 0:
continue
# We expect a single domain at the moment
domain_info = matching_domains[0]
domain = domain_info['domain']
domain_graph = domain_info['graph']
# Figure out some basic properties about the node
is_domain_start = win_treenode_id == domain.start_node_id
is_domain_end = win_treenode_id in domain_end_ids
# Check if the winning merge treenode is the start of an interval in
# this sampler.
cursor.execute("""
SELECT id
FROM catmaid_samplerinterval
WHERE project_id = %(project_id)s
AND domain_id= %(domain_id)s
AND (start_node_id = %(treenode_id)s
OR end_node_id = %(treenode_id)s)
""", {
'domain_id': domain.id,
'project_id': project_id,
'treenode_id': win_treenode_id,
})
start_end_intervals = cursor.fetchall()
is_interval_start_or_end = len(start_end_intervals) > 0
# is_in_interval =
# is_in_traced_out_part = not is_domain_end
# For each domain in this sampler in which the winning merging treenode
# is contained, we need to update the
new_domain_ends = []
if win_sampler_handling == "create-intervals":
raise ValueError("Extending an existing sampler domain using a "
"merge is not yet supported")
elif win_sampler_handling == "branch":
# Nothing needs to be done here if the winning merge node is not an
# interval start or end. If it is, an error is raised in this mode,
# because we don't treat interval start/end branches as part of the
# interval.
if is_interval_start_or_end:
raise ValueError("Please merge into an adjacent node, because " + \
f"the current target ({win_treenode_id}) is a start or end of an interval")
else:
# It doesn't matter whether this fragment is merged into an
# interval or not.
pass
elif win_sampler_handling == "domain-end" or \
win_sampler_handling == "new-domain":
if is_domain_start:
# If we merge into a domain start and want to keep the domain
# integrity, we need to add a new end at the losing treenode.
new_domain_ends.append(lose_treenode_id)
elif is_domain_end:
# If we merge into a domain end and want to keep this the end,
# nothing has to be done. Regardless of whether it is a leaf or
# not.
pass
# elif is_in_interval:
# new_domain_ends.append(lose_treenode_id)
#
# if is_in_traced_out_part:
# A traced out fragment isn't part of the initial interval,
# but has been added while tracing out the interval. To
# maintain this as a part of this domain, we need to add
# regular intervals on this branch (starting from the last
# regular interval node and add the losing treenode as
# domain end.
# TODO
# new_domain_ends.append(lose_treenode_id)
else:
# If we merge into the domain, but not into an interval, make
# sure the domain isn't extended here by adding a new domain end
# ad the merged in node.
new_domain_ends.append(lose_treenode_id)
if win_sampler_handling == "new-domain":
# Add new domain
new_domain = SamplerDomain.objects.create(project_id=project_id,
user_id=user_id, sampler=sampler, start_node_id=lose_treenode_id,
domain_type=regular_domain_type)
n_added_domains += 1
for leaf in lose_graph_end_nodes:
SamplerDomainEnd.objects.create(domain=new_domain,
end_node_id=leaf)
n_added_domain_ends += 1
# Add new domain ends
for end_node in new_domain_ends:
SamplerDomainEnd.objects.create(domain=domain, end_node_id=end_node)
n_added_domain_ends += 1
return {
'n_samplers': n_samplers_win + n_samplers_lose,
'n_samplers_win': n_samplers_win,
'n_samplers_lose': n_samplers_lose,
'n_deleted_intervals': n_deleted_intervals,
'n_deleted_domains': n_deleted_domains,
'n_deleted_samplers': n_deleted_samplers,
'n_added_intervals': n_added_intervals,
'n_added_domains': n_added_domains,
'n_added_domain_ends': n_added_domain_ends,
}
@api_view(['POST'])
@requires_user_role(UserRole.Import)
def import_skeleton(request:HttpRequest, project_id=None) -> Union[HttpResponse, HttpResponseBadRequest]:
"""Import a neuron modeled by a skeleton from an uploaded file.
Currently only SWC and eSWC representation is supported.
---
consumes: multipart/form-data
parameters:
- name: neuron_id
description: >
If specified a request for a particular neuron ID is expressed. If
force = true, this request is enforced and the existing neuron ID
(and all its skeletons) is replaced (as long as they are in the
target project). If force = false (default), the neuron ID is only
used if available and a new one is generated otherwise.
paramType: form
type: integer
- name: skeleton_id
description: >
If specified a request for a particular skeleton ID is expressed. If
force = true, this request is enforced and the existing skeleton ID
(and all its neurons) is replaced (as long as they are in the target
project). If force = false (default), the skeleton ID is only used
if available and a new one is generated otherwise.
paramType: form
type: integer
- name: force
description: >
If neuron_id or skeleton_id are passed in, existing neuron/skeleton
instances in this project are replaced. All their respectively
linked skeletons and neurons will be removed.
type: boolean
required: false
defaultValue: false
paramType: form
- name: auto_id
description: >
If a passed in neuron ID or skeleton ID is already in use, a new ID
will be selected automatically (default). If auto_id is set to false,
an error is raised in this situation.
type: boolean
required: false
defaultValue: true
paramType: form
- name: name
description: >
If specified, the name of a new neuron will be set to this.
paramType: form
type: string
- name: annotations
description: >
An optional list of annotation names that is added to the imported
skeleton.
paramType: form
type: array
items:
type: string
- name: source_id
description: >
If specified, this source ID will be saved and mapped to the new
skeleton ID.
paramType: form
type: integer
- name: source_project_id
description: >
If specified, this source project ID will be saved and mapped to the
new skeleton ID. This is only valid together with source_id and
source_url.
paramType: form
type: integer
- name: source_url
description: >
If specified, this source URL will be saved and mapped to the new
skeleton ID.
paramType: form
type: string
- name: source_type
description: >
Can be either 'skeleton' or 'segmentation', to further specify of
what type the origin data is.
paramType: form
type: string
- name: file
required: true
description: A skeleton representation file to import.
paramType: body
dataType: File
type:
neuron_id:
type: integer
required: true
description: ID of the neuron used or created.
skeleton_id:
type: integer
required: true
description: ID of the imported skeleton.
node_id_map:
required: true
description: >
An object whose properties are node IDs in the import file and
whose values are IDs of the created nodes.
"""
project_id = int(project_id)
neuron_id = request.POST.get('neuron_id', None)
if neuron_id is not None:
neuron_id = int(neuron_id)
skeleton_id = request.POST.get('skeleton_id', None)
if skeleton_id is not None:
skeleton_id = int(skeleton_id)
force = get_request_bool(request.POST, 'force', False)
auto_id = get_request_bool(request.POST, 'auto_id', True)
name = request.POST.get('name', None)
annotations = get_request_list(request.POST, 'annotations', ['Import'])
source_id = request.POST.get('source_id', None)
source_url = request.POST.get('source_url', None)
source_project_id = request.POST.get('source_project_id', None)
source_type = request.POST.get('source_type', 'skeleton')
if len(request.FILES) == 1:
for uploadedfile in request.FILES.values():
if uploadedfile.size > settings.IMPORTED_SKELETON_FILE_MAXIMUM_SIZE:
return HttpResponse(f'File too large. Maximum file size is {settings.IMPORTED_SKELETON_FILE_MAXIMUM_SIZE} bytes.', status=413)
filename = uploadedfile.name
extension = filename.split('.')[-1].strip().lower()
if extension == 'swc':
swc_string = '\n'.join([line.decode('utf-8') for line in uploadedfile])
return import_skeleton_swc(request.user, project_id, swc_string,
neuron_id, skeleton_id, name, annotations, force,
auto_id, source_id, source_url, source_project_id,
source_type)
if extension == 'eswc':
swc_string = '\n'.join([line.decode('utf-8') for line in uploadedfile])
return import_skeleton_eswc(request.user, project_id, swc_string,
neuron_id, skeleton_id, name, annotations, force,
auto_id, source_id, source_url, source_project_id,
source_type)
else:
return HttpResponse(f'File type "{extension}" not understood. Known file types: swc', status=415)
return HttpResponseBadRequest('No file received.')
def import_skeleton_swc(user, project_id, swc_string, neuron_id=None,
skeleton_id=None, name=None, annotations=['Import'], force=False,
auto_id=True, source_id=None, source_url=None, source_project_id=None,
source_type='skeleton') -> JsonResponse:
"""Import a neuron modeled by a skeleton in SWC format.
"""
g = nx.DiGraph()
for line in swc_string.splitlines():
if line.startswith('#') or not line.strip():
continue
row = line.strip().split()
if len(row) != 7:
raise ValueError(f'SWC has a malformed line: {line}')
node_id = int(row[0])
parent_id = int(row[6])
g.add_node(node_id, {'x': float(row[2]),
'y': float(row[3]),
'z': float(row[4]),
'radius': float(row[5])})
if parent_id != -1:
g.add_edge(parent_id, node_id)
if not nx.is_directed_acyclic_graph(g):
raise ValueError('SWC skeleton is malformed: it contains a cycle.')
import_info = _import_skeleton(user, project_id, g, neuron_id, skeleton_id,
name, annotations, force, auto_id, source_id, source_url,
source_project_id, source_type)
node_id_map = {n: d['id'] for n, d in import_info['graph'].nodes_iter(data=True)}
return JsonResponse({
'neuron_id': import_info['neuron_id'],
'skeleton_id': import_info['skeleton_id'],
'node_id_map': node_id_map,
})
def import_skeleton_eswc(user, project_id, swc_string, neuron_id=None,
skeleton_id=None, name=None, annotations=['Import'], force=False,
auto_id=True, source_id=None, source_url=None, source_project_id=None,
source_type='skeleton') -> JsonResponse:
"""Import a neuron modeled by a skeleton in eSWC format.
"""
user_map = dict(User.objects.all().values_list('username', 'id'))
parse_time = dateutil.parser.parse
g = nx.DiGraph()
for line in swc_string.splitlines():
if line.startswith('#') or not line.strip():
continue
row = line.strip().split()
if len(row) != 12:
raise ValueError(f'eSWC has a malformed line ({len(row)} instead of 12 columns): {line}')
if row[7] not in user_map:
# Create deactivated user with this username
new_creator = User.objects.create(username=row[7], is_active=False)
user_map[row[7]] = new_creator.id
if row[9] not in user_map:
# Create deactivated user with this username
new_editor = User.objects.create(username=row[9], is_active=False)
user_map[row[9]] = new_editor.id
node_id = int(row[0])
parent_id = int(row[6])
g.add_node(node_id, {
'x': float(row[2]),
'y': float(row[3]),
'z': float(row[4]),
'radius': float(row[5]),
'user_id': user_map[row[7]],
'creation_time': parse_time(row[8]),
'editor_id': user_map[row[9]],
'edition_time': parse_time(row[10]),
'confidence': int(row[11]),
})
if parent_id != -1:
g.add_edge(parent_id, node_id)
if not nx.is_directed_acyclic_graph(g):
raise ValueError('SWC skeleton is malformed: it contains a cycle.')
import_info = _import_skeleton(user, project_id, g, neuron_id, skeleton_id,
name, annotations, force, auto_id, source_id, source_url,
source_project_id, source_type, extended_data=True)
node_id_map = {n: d['id'] for n, d in import_info['graph'].nodes_iter(data=True)}
return JsonResponse({
'neuron_id': import_info['neuron_id'],
'skeleton_id': import_info['skeleton_id'],
'node_id_map': node_id_map,
})
def _import_skeleton(user, project_id, arborescence, neuron_id=None,
skeleton_id=None, name=None, annotations=['Import'], force=False,
auto_id=True, source_id=None, source_url=None, source_project_id=None,
source_type='skeleton', extended_data=False, map_available_users=True) -> Dict[str, Any]:
"""Create a skeleton from a networkx directed tree.
Associate the skeleton to the specified neuron, or a new one if none is
provided. Returns a dictionary of the neuron and skeleton IDs, and the
original arborescence with attributes added for treenode IDs.
"""
# TODO: There is significant reuse here of code from create_treenode that
# could be DRYed up.
relation_map = get_relation_to_id_map(project_id)
class_map = get_class_to_id_map(project_id)
new_neuron = None
if neuron_id is not None:
# Check that the neuron to use exists
try:
existing_neuron = ClassInstance.objects.select_related('class_column').get(pk=neuron_id)
if force:
if existing_neuron.project_id != project_id:
raise ValueError("Target neuron exists, but is part of other project")
if existing_neuron.class_column.class_name != 'neuron':
raise ValueError(f"Existing object with ID {existing_neuron.id} is not a neuron, " + \
f"but is marked as {existing_neuron.class_column.class_name}")
# Remove all data linked to this neuron, including skeletons
cici = ClassInstanceClassInstance.objects.filter(
class_instance_b=neuron_id,
relation_id=relation_map['model_of'],
class_instance_a__class_column_id=class_map['skeleton'])
# Raise an Exception if the user doesn't have permission to
# edit the existing neuron.
can_edit_class_instance_or_fail(user, neuron_id, 'neuron')
# Raise an Exception if the user doesn't have permission to
# edit the existing skeleton.
for skeleton_link in cici:
old_skeleton = skeleton_link.class_instance_a
can_edit_class_instance_or_fail(user, old_skeleton.id, 'class_instance')
# Require users to have edit permission on all treenodes of the
# skeleton.
treenodes = Treenode.objects.filter(skeleton_id=old_skeleton.id,
project_id=project_id)
treenode_ids = treenodes.values_list('id', flat=True)
can_edit_all_or_fail(user, treenode_ids, 'treenode')
# Remove existing skeletons
skeleton_link.delete()
old_skeleton.delete()
treenodes.delete()
new_neuron = existing_neuron
elif auto_id:
# The neuron ID exists already, and with force=False no data
# will be replaced.
neuron_id = None
else:
raise ValueError("The passed in neuron ID is already in use and "
"neither of the parameters force or auto_id are set to true.")
except ClassInstance.DoesNotExist:
# The neuron ID is okay to use
pass
new_skeleton = None
if skeleton_id is not None:
# Check that the skeleton to use exists
try:
existing_skeleton = ClassInstance.objects.get(pk=skeleton_id)
if force:
if existing_skeleton.project_id != project_id:
raise ValueError("Target skeleton exists, but is part of other project")
if existing_skeleton.class_column.class_name != 'skeleton':
raise ValueError(f"Existing object with ID {existing_skeleton.id} is not a skeleton, " + \
f"but marked as {existing_skeleton.class_column.class_name}")
# Remove all data linked to this neuron, including skeletons
cici = ClassInstanceClassInstance.objects.filter(
class_instance_a=skeleton_id,
relation_id=relation_map['model_of'],
class_instance_b__class_column_id=class_map['neuron'])
# Raise an Exception if the user doesn't have permission to
# edit the existing skeleton.
can_edit_class_instance_or_fail(user, skeleton_id, 'skeleton')
# Require users to have edit permission on all treenodes of the
# skeleton.
treenodes = Treenode.objects.filter(skeleton_id=skeleton_id,
project_id=project_id)
treenode_ids = treenodes.values_list('id', flat=True)
# Raise an Exception if the user doesn't have permission to
# edit the existing treenodes.
can_edit_all_or_fail(user, treenode_ids, 'treenode')
for link in cici:
old_neuron = link.class_instance_b
can_edit_class_instance_or_fail(user, old_neuron.id, 'class_instance')
# Remove existing skeletons
link.delete()
old_neuron.delete()
treenodes.delete()
new_skeleton = existing_skeleton
elif auto_id:
# The skeleton ID exists already, and with force=False no data
# will be replaced.
skeleton_id = None
else:
raise ValueError("The passed in skeleton ID is already in use and "
"neither of the parameters force or auto_id are set to true.")
except ClassInstance.DoesNotExist:
# The skeleton ID is okay to use
pass
if not new_skeleton:
new_skeleton = ClassInstance()
new_skeleton.id = skeleton_id
new_skeleton.user = user
new_skeleton.project_id = project_id
new_skeleton.class_column_id = class_map['skeleton']
if name is not None:
new_skeleton.name = name
else:
new_skeleton.name = 'skeleton'
new_skeleton.save()
new_skeleton.name = 'skeleton %d' % new_skeleton.id
new_skeleton.save()
skeleton_id = new_skeleton.id
def relate_neuron_to_skeleton(neuron, skeleton):
return _create_relation(user, project_id,
relation_map['model_of'], skeleton, neuron)
if not new_neuron:
new_neuron = ClassInstance()
new_neuron.id = neuron_id
new_neuron.user = user
new_neuron.project_id = project_id
new_neuron.class_column_id = class_map['neuron']
if name is not None:
new_neuron.name = name
else:
new_neuron.name = 'neuron'
new_neuron.save()
new_neuron.name = 'neuron %d' % new_neuron.id
new_neuron.save()
neuron_id = new_neuron.id
has_new_neuron_id = new_neuron.id == neuron_id
has_new_skeleton_id = new_skeleton.id == skeleton_id
relate_neuron_to_skeleton(neuron_id, new_skeleton.id)
# Add annotations, if that is requested
if annotations:
annotation_map = {a:{'user_id': user.id} for a in annotations}
_annotate_entities(project_id, [new_neuron.id], annotation_map)
# For pathological networks this can error, so do it before inserting
# treenodes.
root = find_root(arborescence)
if root is None:
raise ValueError('No root, provided graph is malformed!')
# Bulk create the required number of treenodes. This must be done in two
# steps because treenode IDs are not known.
cursor = connection.cursor()
cursor.execute("""
INSERT INTO treenode (project_id, location_x, location_y, location_z,
editor_id, user_id, skeleton_id)
SELECT t.project_id, t.x, t.x, t.x, t.user_id, t.user_id, t.skeleton_id
FROM (VALUES (%(project_id)s, 0, %(user_id)s, %(skeleton_id)s))
AS t (project_id, x, user_id, skeleton_id),
generate_series(1, %(num_treenodes)s)
RETURNING treenode.id
""", {
'project_id': int(project_id),
'user_id': user.id,
'skeleton_id': new_skeleton.id,
'num_treenodes': arborescence.number_of_nodes()
})
treenode_ids = cursor.fetchall()
# Flatten IDs
treenode_ids = list(chain.from_iterable(treenode_ids))
nx.set_node_attributes(arborescence, 'id', dict(zip(arborescence.nodes(), treenode_ids)))
# Set parent node ID
for n, nbrs in arborescence.adjacency_iter():
for nbr in nbrs:
# FIXME: the cast here and below in the SQL (::bigint) shouldn't be
# needed
arborescence.node[nbr]['parent_id'] = int(arborescence.node[n]['id'])
if 'radius' not in arborescence.node[nbr]:
arborescence.node[nbr]['radius'] = -1
arborescence.node[root]['parent_id'] = None
if 'radius' not in arborescence.node[root]:
arborescence.node[root]['radius'] = -1
new_location = tuple([arborescence.node[root][k] for k in ('x', 'y', 'z')])
if extended_data:
treenode_template = '(' + '),('.join(
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' for _ in arborescence.nodes_iter()
) + ')'
treenode_values = list(chain.from_iterable(
[
d['id'], d['x'], d['y'], d['z'],
d['parent_id'], d['radius'], d['user_id'],
d['creation_time'], d['editor_id'],
d['edition_time'], d['confidence']
] for n, d in arborescence.nodes_iter(data=True)
))
# Include skeleton ID for index performance.
cursor.execute(f"""
UPDATE treenode SET
location_x = v.x,
location_y = v.y,
location_z = v.z,
parent_id = v.parent_id::bigint,
radius = v.radius,
user_id = v.user_id,
creation_time = v.creation_time,
editor_id = v.editor_id,
edition_time = v.edition_time,
confidence = v.confidence
FROM (VALUES {treenode_template}) AS v(id, x, y, z, parent_id,
radius, user_id, creation_time, editor_id, edition_time,
confidence)
WHERE treenode.id = v.id
AND treenode.skeleton_id = %s
""", treenode_values + [new_skeleton.id])
else:
treenode_template = '(' + '),('.join('%s,%s,%s,%s,%s,%s' for _ in arborescence.nodes_iter()) + ')'
treenode_values = list(chain.from_iterable([d['id'], d['x'], d['y'], d['z'], d['parent_id'], d['radius']] \
for n, d in arborescence.nodes_iter(data=True)))
# Include skeleton ID for index performance.
cursor.execute(f"""
UPDATE treenode SET
location_x = v.x,
location_y = v.y,
location_z = v.z,
parent_id = v.parent_id,
radius = v.radius
FROM (VALUES {treenode_template}) AS v(id, x, y, z, parent_id, radius)
WHERE treenode.id = v.id
AND treenode.skeleton_id = %s
""", treenode_values + [new_skeleton.id])
# Log import.
annotation_info = f' {", ".join(annotations)}' if annotations else ''
insert_into_log(project_id, user.id, 'create_neuron',
new_location, f'Create neuron {new_neuron.id} and skeleton '
f'{new_skeleton.id} via import.{annotation_info}')
# Store reference to source ID and source URL, if provided.
if source_url and source_project_id:
data_source = get_data_source(project_id, source_url, source_project_id, user.id)
skeleton_origin = SkeletonOrigin.objects.create(project_id=project_id,
user_id=user.id, data_source=data_source,
skeleton_id=new_skeleton.id, source_id=source_id,
source_type=source_type)
if neuron_id or skeleton_id:
# Reset ID sequence if IDs have been passed in.
cursor.execute("""
SELECT setval('concept_id_seq', coalesce(max("id"), 1), max("id") IS NOT null)
FROM concept;
SELECT setval('location_id_seq', coalesce(max("id"), 1), max("id") IS NOT null)
FROM location;
""")
return {
'neuron_id': neuron_id,
'skeleton_id': new_skeleton.id,
'graph': arborescence,
'has_new_neuron_id': has_new_neuron_id,
'has_new_skeleton_id': has_new_skeleton_id,
}
@requires_user_role(UserRole.Annotate)
def reset_own_reviewer_ids(request:HttpRequest, project_id=None, skeleton_id=None) -> JsonResponse:
""" Remove all reviews done by the requsting user in the skeleten with ID
<skeleton_id>.
"""
skeleton_id = int(skeleton_id) # sanitize
Review.objects.filter(skeleton_id=skeleton_id, reviewer=request.user).delete()
insert_into_log(project_id, request.user.id, 'reset_reviews',
None, 'Reset reviews for skeleton %s' % skeleton_id)
return JsonResponse({'status': 'success'})
@requires_user_role(UserRole.Browse)
def annotation_list(request:HttpRequest, project_id=None) -> JsonResponse:
""" Returns a JSON serialized object that contains information about the
given skeletons.
"""
skeleton_ids = tuple(int(v) for k,v in request.POST.items()
if k.startswith('skeleton_ids['))
annotations = bool(int(request.POST.get("annotations", 0)))
metaannotations = bool(int(request.POST.get("metaannotations", 0)))
neuronnames = bool(int(request.POST.get("neuronnames", 0)))
ignore_invalid = get_request_bool(request.POST, "ignore_invalid", False)
response = get_annotation_info(project_id, skeleton_ids, annotations,
metaannotations, neuronnames, ignore_invalid)
return JsonResponse(response)
def get_annotation_info(project_id, skeleton_ids, annotations, metaannotations,
neuronnames, ignore_invalid=False) -> Dict[str, Any]:
if not skeleton_ids:
raise ValueError("No skeleton IDs provided")
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
cursor = connection.cursor()
# Create a map of skeleton IDs to neuron IDs
cursor.execute("""
SELECT cici.class_instance_a, cici.class_instance_b
FROM class_instance_class_instance cici
WHERE cici.project_id = %(project_id)s AND
cici.relation_id = %(model_of)s AND
cici.class_instance_a = ANY(%(skeleton_ids)s::bigint[])
""", {
'project_id': project_id,
'model_of': relations['model_of'],
'skeleton_ids': list(skeleton_ids),
})
n_to_sk_ids = {n:s for s,n in cursor.fetchall()}
neuron_ids = list(n_to_sk_ids.keys())
if not neuron_ids:
raise Http404('No skeleton or neuron found')
# Query for annotations of the given skeletons, specifically
# neuron_id, auid, aid and aname.
cursor.execute("""
SELECT cici.class_instance_a AS neuron_id, cici.user_id AS auid,
cici.class_instance_b AS aid, ci.name AS aname
FROM class_instance_class_instance cici INNER JOIN
class_instance ci ON cici.class_instance_b = ci.id
WHERE cici.relation_id = %(annotated_with)s
AND cici.class_instance_a = ANY(%(neuron_ids)s::bigint[])
AND ci.class_id = %(annotation)s
""", {
'annotated_with': relations['annotated_with'],
'neuron_ids': neuron_ids,
'annotation': classes['annotation'],
})
# Build result dictionaries: one that maps annotation IDs to annotation
# names and another one that lists annotation IDs and annotator IDs for
# each skeleton ID.
annotations = {}
skeletons:Dict = {}
for row in cursor.fetchall():
skid, auid, aid, aname = n_to_sk_ids[row[0]], row[1], row[2], row[3]
if aid not in annotations:
annotations[aid] = aname
skeleton = skeletons.get(skid)
if not skeleton:
skeleton = {'annotations': []}
skeletons[skid] = skeleton
skeleton['annotations'].append({
'uid': auid,
'id': aid,
})
# Assemble response
response = {
'annotations': annotations,
'skeletons': skeletons,
}
# If wanted, get the neuron name of each skeleton
if neuronnames:
cursor.execute("""
SELECT ci.id, ci.name
FROM class_instance ci
WHERE ci.id = ANY(%(neuron_ids)s::bigint[])
""", {
'neuron_ids': neuron_ids,
})
response['neuronnames'] = {n_to_sk_ids[n]:name for n,name in cursor.fetchall()}
# If wanted, get the meta annotations for each annotation
if metaannotations and len(annotations):
# Request only ID of annotated annotations, annotator ID, meta
# annotation ID, meta annotation Name
cursor.execute("""
SELECT cici.class_instance_a AS aid, cici.user_id AS auid,
cici.class_instance_b AS maid, ci.name AS maname
FROM class_instance_class_instance cici
INNER JOIN class_instance ci
ON cici.class_instance_b = ci.id
WHERE cici.project_id = %(project_id)s
AND cici.relation_id = %(annotated_with)s
AND cici.class_instance_a = ANY (%(annotation_ids)s::bigint[])
AND ci.class_id = %(annotation)s
""", {
'project_id': project_id,
'annotated_with': relations['annotated_with'],
'annotation_ids': list(annotations.keys()),
'annotation': classes['annotation'],
})
# Add this to the response
metaannotations = {}
for row in cursor.fetchall():
aaid, auid, maid, maname = row[0], row[1], row[2], row[3]
if maid not in annotations:
annotations[maid] = maname
annotation = metaannotations.get(aaid)
if not annotation:
annotation = {'annotations': []}
metaannotations[aaid] = annotation
annotation['annotations'].append({
'uid': auid,
'id': maid,
})
response['metaannotations'] = metaannotations
return response
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def list_skeletons(request:HttpRequest, project_id) -> JsonResponse:
"""List skeletons matching filtering criteria.
The result set is the intersection of skeletons matching criteria (the
criteria are conjunctive) unless stated otherwise.
---
parameters:
- name: created_by
description: Filter for user ID of the skeletons' creator.
type: integer
paramType: query
- name: reviewed_by
description: Filter for user ID of the skeletons' reviewer.
type: integer
paramType: query
- name: from_date
description: Filter for skeletons with nodes created after this date.
type: string
format: date
paramType: query
- name: to_date
description: Filter for skeletons with nodes created before this date.
type: string
format: date
paramType: query
- name: nodecount_gt
description: |
Filter for skeletons with more nodes than this threshold. Removes
all other criteria.
type: integer
paramType: query
type:
- type: array
items:
type: integer
description: ID of skeleton matching the criteria.
required: true
"""
created_by = request.GET.get('created_by', None)
reviewed_by = request.GET.get('reviewed_by', None)
from_date = request.GET.get('from', None)
to_date = request.GET.get('to', None)
nodecount_gt = int(request.GET.get('nodecount_gt', 0))
# Sanitize
if reviewed_by:
reviewed_by = int(reviewed_by)
if created_by:
created_by = int(created_by)
if from_date:
from_date = datetime.strptime(from_date, '%Y%m%d')
if to_date:
to_date = datetime.strptime(to_date, '%Y%m%d')
response = _list_skeletons(project_id, created_by, reviewed_by, from_date, to_date, nodecount_gt)
return JsonResponse(response, safe=False)
def _list_skeletons(project_id, created_by=None, reviewed_by=None, from_date=None,
to_date=None, nodecount_gt=0) -> List:
""" Returns a list of skeleton IDs of which nodes exist that fulfill the
given constraints (if any). It can be constrained who created nodes in this
skeleton during a given period of time. Having nodes that are reviewed by
a certain user is another constraint. And so is the node count that one can
specify which each result node must exceed.
"""
if created_by and reviewed_by:
raise ValueError("Please specify node creator or node reviewer")
params = {
'project_id': project_id,
}
if reviewed_by:
params['reviewed_by'] = reviewed_by
query = '''
SELECT DISTINCT r.skeleton_id
FROM review r
WHERE r.project_id=%(project_id)s
AND r.reviewer_id=%(reviewed_by)s
'''
if from_date:
params['from_date'] = from_date.isoformat()
query += " AND r.review_time >= %(from_date)s"
if to_date:
to_date = to_date + timedelta(days=1)
params['to_date'] = to_date.isoformat()
query += " AND r.review_time < %(to_date)s"
else:
query = '''
SELECT skeleton_id
FROM catmaid_skeleton_summary css
WHERE css.project_id=%(project_id)s
'''
if created_by:
query = '''
SELECT DISTINCT skeleton_id
FROM treenode t
WHERE t.project_id=%(project_id)s
AND t.user_id=%(created_by)s
'''
params['created_by'] = created_by
if from_date:
params['from_date'] = from_date.isoformat()
query += " AND t.creation_time >= %(from_date)s"
if to_date:
to_date = to_date + timedelta(days=1)
params['to_date'] = to_date.isoformat()
query += " AND t.creation_time < %(to_date)s"
if nodecount_gt > 0:
params['nodecount_gt'] = nodecount_gt
query = f'''
SELECT s.skeleton_id
FROM ({query}) q JOIN catmaid_skeleton_summary s
ON q.skeleton_id = s.skeleton_id
WHERE s.num_nodes > %(nodecount_gt)s
'''
cursor = connection.cursor()
cursor.execute(query, params)
return [r[0] for r in cursor.fetchall()]
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def adjacency_matrix(request:HttpRequest, project_id=None) -> JsonResponse:
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
nodeslist = [ {'group': 1,
'id': k,
'name': d['neuronname']} for k,d in skelgroup.graph.nodes_iter(data=True) ]
nodesid_list = [ele['id'] for ele in nodeslist]
data = {
'nodes': nodeslist,
'links': [ {'id': '%i_%i' % (u,v),
'source': nodesid_list.index(u),
'target': nodesid_list.index(v),
'value': d['count']} for u,v,d in skelgroup.graph.edges_iter(data=True) ]
}
return JsonResponse(data, json_dumps_params={'sort_keys': True, 'indent': 4})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_subgraph(request:HttpRequest, project_id=None) -> JsonResponse:
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
data = {
'nodes': [ {'id': str(k),
'label': str(d['baseName']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']
} for k,d in skelgroup.graph.nodes_iter(data=True) ],
'edges': [ {'id': '%i_%i' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True} for u,v,d in skelgroup.graph.edges_iter(data=True) ]
}
return JsonResponse(data, json_dumps_params={'sort_keys': True, 'indent': 4})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_confidence_compartment_subgraph(request:HttpRequest, project_id=None) -> JsonResponse:
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
confidence = int(request.POST.get('confidence_threshold', 5))
p = get_object_or_404(Project, pk=project_id)
# skelgroup = SkeletonGroup( skeletonlist, p.id )
# split up where conficence bigger than confidence
resultgraph = compartmentalize_skeletongroup_by_confidence( skeletonlist, p.id, confidence )
data = {
'nodes': [ { 'data': {'id': str(k),
'label': str(d['neuronname']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']} } for k,d in resultgraph.nodes_iter(data=True) ],
'edges': [ { 'data': {'id': '%s_%s' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True}} for u,v,d in resultgraph.edges_iter(data=True) ]
}
return JsonResponse(data, json_dumps_params={'sort_keys': True, 'indent': 4})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeletonlist_edgecount_compartment_subgraph(request:HttpRequest, project_id=None) -> JsonResponse:
skeletonlist = request.POST.getlist('skeleton_list[]')
skeletonlist = map(int, skeletonlist)
edgecount = int(request.POST.get('edgecount', 10))
p = get_object_or_404(Project, pk=project_id)
# skelgroup = SkeletonGroup( skeletonlist, p.id )
# split up where conficence bigger than confidence
resultgraph = compartmentalize_skeletongroup_by_edgecount( skeletonlist, p.id, edgecount )
data = {
'nodes': [ { 'data': {'id': str(k),
'label': str(d['neuronname']),
'skeletonid': str(d['skeletonid']),
'node_count': d['node_count']} } for k,d in resultgraph.nodes_iter(data=True) ],
'edges': [ { 'data': {'id': '%s_%s' % (u,v),
'source': str(u),
'target': str(v),
'weight': d['count'],
'label': str(d['count']),
'directed': True}} for u,v,d in resultgraph.edges_iter(data=True) ]
}
return JsonResponse(data, json_dumps_params={'sort_keys': True, 'indent': 4})
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def all_shared_connectors(request:HttpRequest, project_id=None) -> JsonResponse:
skeletonlist = request.POST.getlist('skeletonlist[]')
skeletonlist = map(int, skeletonlist)
p = get_object_or_404(Project, pk=project_id)
skelgroup = SkeletonGroup( skeletonlist, p.id )
return JsonResponse(dict.fromkeys(skelgroup.all_shared_connectors()))
@api_view(['GET', 'POST'])
@requires_user_role([UserRole.Browse])
def skeletons_by_node_labels(request:HttpRequest, project_id=None) -> JsonResponse:
"""Return relationship between label IDs and skeleton IDs
---
parameters:
- name: label_ids[]
description: IDs of the labels to find skeletons associated with
required: true
type: array
items:
type: integer
paramType: form
- name: label_names[]
description: Alternative to `label_ids` to pass in a list label names.
required: true
type: array
items:
type: string
paramType: form
type:
- type: array
items:
type: integer
description: array of [label_id, [skel_id1, skel_id2, skel_id3, ...]] tuples
required: true
"""
label_ids = get_request_list(request.POST, 'label_ids', default=[], map_fn=int)
label_names = get_request_list(request.POST, 'label_names', default=[])
if not label_ids and not label_names:
return JsonResponse([], safe=False)
label_class = Class.objects.get(project=project_id, class_name='label')
labeled_as_relation = Relation.objects.get(project=project_id, relation_name='labeled_as')
if label_names:
extra_label_ids = ClassInstance.objects.filter(project_id=project_id,
class_column=label_class, name__in=label_names).values_list('id', flat=True)
label_ids.extend(extra_label_ids)
cursor = connection.cursor()
cursor.execute("""
SELECT ci.id, array_agg(DISTINCT t.skeleton_id)
FROM treenode t
JOIN treenode_class_instance tci
ON t.id = tci.treenode_id
JOIN class_instance ci
ON tci.class_instance_id = ci.id
JOIN UNNEST(%(label_ids)s::bigint[]) label(id)
ON label.id = ci.id
WHERE ci.project_id = %(project_id)s
AND tci.relation_id = %(labeled_as)s
GROUP BY ci.id;
""", {
'label_ids': label_ids,
'project_id': int(project_id),
'labeled_as': labeled_as_relation.id
})
return JsonResponse(cursor.fetchall(), safe=False)
def get_skeletons_in_bb(params) -> List:
cursor = connection.cursor()
extra_joins = []
extra_where = []
min_nodes = params.get('min_nodes', 0)
min_cable = params.get('min_cable', 0)
needs_summary = min_nodes > 0 or min_cable > 0
provider = params.get('src', 'postgis2d')
skeleton_ids = params.get('skeleton_ids')
node_query = ""
if needs_summary:
extra_joins.append("""
JOIN catmaid_skeleton_summary css
ON css.skeleton_id = skeleton.id
""")
if min_nodes > 1:
extra_where.append("""
css.num_nodes >= %(min_nodes)s
""")
if min_cable > 0:
extra_where.append("""
css.cable_length >= %(min_cable)s
""")
if skeleton_ids:
extra_joins.append("""
JOIN UNNEST(%(skeleton_ids)s::bigint[]) query_skeleton(id)
ON query_skeleton.id = skeleton.id
""")
if provider == 'postgis2d':
node_query = """
SELECT DISTINCT t.skeleton_id
FROM (
SELECT te.id, te.edge
FROM treenode_edge te
WHERE floatrange(ST_ZMin(te.edge),
ST_ZMax(te.edge), '[]') && floatrange(%(minz)s, %(maxz)s, '[)')
AND te.project_id = %(project_id)s
) e
JOIN treenode t
ON t.id = e.id
WHERE e.edge && ST_MakeEnvelope(%(minx)s, %(miny)s, %(maxx)s, %(maxy)s)
AND ST_3DDWithin(e.edge, ST_MakePolygon(ST_MakeLine(ARRAY[
ST_MakePoint(%(minx)s, %(miny)s, %(halfz)s),
ST_MakePoint(%(maxx)s, %(miny)s, %(halfz)s),
ST_MakePoint(%(maxx)s, %(maxy)s, %(halfz)s),
ST_MakePoint(%(minx)s, %(maxy)s, %(halfz)s),
ST_MakePoint(%(minx)s, %(miny)s, %(halfz)s)]::geometry[])),
%(halfzdiff)s)
"""
elif provider == 'postgis3d':
node_query = """
SELECT DISTINCT t.skeleton_id
FROM treenode_edge te
JOIN treenode t
ON t.id = te.id
WHERE te.edge &&& ST_MakeLine(ARRAY[
ST_MakePoint(%(minx)s, %(maxy)s, %(maxz)s),
ST_MakePoint(%(maxx)s, %(miny)s, %(minz)s)] ::geometry[])
AND ST_3DDWithin(te.edge, ST_MakePolygon(ST_MakeLine(ARRAY[
ST_MakePoint(%(minx)s, %(miny)s, %(halfz)s),
ST_MakePoint(%(maxx)s, %(miny)s, %(halfz)s),
ST_MakePoint(%(maxx)s, %(maxy)s, %(halfz)s),
ST_MakePoint(%(minx)s, %(maxy)s, %(halfz)s),
ST_MakePoint(%(minx)s, %(miny)s, %(halfz)s)]::geometry[])),
%(halfzdiff)s)
AND te.project_id = %(project_id)s
"""
else:
raise ValueError('Need valid node provider (src)')
if extra_where:
extra_where_val = 'WHERE ' + '\nAND '.join(extra_where)
else:
extra_where_val = ''
query = """
SELECT skeleton.id
FROM (
{node_query}
) skeleton(id)
{extra_joins}
{extra_where}
""".format(**{
'extra_joins': '\n'.join(extra_joins),
'extra_where': extra_where_val,
'node_query': node_query,
})
cursor.execute(query, params)
return [r[0] for r in cursor.fetchall()]
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def skeletons_in_bounding_box(request:HttpRequest, project_id) -> JsonResponse:
"""Get a list of all skeletons that intersect with the passed in bounding
box. Optionally, only a subsed of passed in skeletons can be tested against.
---
parameters:
- name: limit
description: |
Limit the number of returned nodes.
required: false
type: integer
defaultValue: 0
paramType: form
- name: minx
description: |
Minimum world space X coordinate
required: true
type: float
paramType: form
- name: miny
description: |
Minimum world space Y coordinate
required: true
type: float
paramType: form
- name: minz
description: |
Minimum world space Z coordinate
required: true
type: float
paramType: form
- name: maxx
description: |
Maximum world space X coordinate
required: true
type: float
paramType: form
- name: maxy
description: |
Maximum world space Y coordinate
required: true
type: float
paramType: form
- name: maxz
description: |
Maximum world space Z coordinate
required: true
type: float
paramType: form
- name: min_nodes
description: |
A minimum number of nodes per result skeleton
required: false
required: false
defaultValue: 0
type: float
paramType: form
- name: min_cable
description: |
A minimum number of cable length per result skeleton
required: false
defaultValue: 0
type: float
paramType: form
- name: volume_id
description: |
Alternative to manual bounding box definition. The bounding box of the
volume is used.
required: false
defaultValue: 0
type: integer
paramType: form
- name: skeleton_ids
description: |
An optional list of skeleton IDs that should only be tested againt. If
used, the result will only contain skeletons of this set.
required: false
defaultValue: 0
type: array
items:
type: integer
paramType: form
type:
- type: array
items:
type: integer
description: array of skeleton IDs
required: true
"""
project_id = int(project_id)
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Unsupported HTTP method: " + request.method)
params = {
'project_id': project_id,
'limit': data.get('limit', 0)
}
volume_id = data.get('volume_id')
if volume_id is not None:
volume = get_volume_details(project_id, volume_id)
bbmin, bbmax = volume['bbox']['min'], volume['bbox']['max']
params['minx'] = bbmin['x']
params['miny'] = bbmin['y']
params['minz'] = bbmin['z']
params['maxx'] = bbmax['x']
params['maxy'] = bbmax['y']
params['maxz'] = bbmax['z']
else:
for p in ('minx', 'miny', 'minz', 'maxx', 'maxy', 'maxz', 'float'):
params[p] = float(data.get(p, 0))
params['halfzdiff'] = abs(params['maxz'] - params['minz']) * 0.5
params['halfz'] = params['minz'] + (params['maxz'] - params['minz']) * 0.5
params['min_nodes'] = int(data.get('min_nodes', 0))
params['min_cable'] = int(data.get('min_cable', 0))
params['skeleton_ids'] = get_request_list(data, 'skeleton_ids', map_fn=int)
skeleton_ids = get_skeletons_in_bb(params)
return JsonResponse(skeleton_ids, safe=False)
@api_view(['GET'])
@requires_user_role([UserRole.Browse])
def change_history(request:HttpRequest, project_id=None) -> JsonResponse:
"""Return the history of all skeletons ID changes in a project over time.
Optionally, this can be constrained by a user ID and a time window.
---
parameters:
- name: project_id
description: Project to operate in
required: true
type: integer
paramType: path
- name: initial_user_id
description: User who caused the first change in all returned skeleton.
required: false
type: integer
paramType: form
- name: changes_after
description: |
Date of format YYYY-MM-DDTHH:mm:ss, only the date part is required.
Limits returns history to skeleton changes after this date.
required: false
type: string
paramType: form
- name: changes_before
description: |
Date of format YYYY-MM-DDTHH:mm:ss, only the date part is required.
Limits returns history to skeleton changes before this date.
required: false
type: string
paramType: form
- name: skeleton_ids
description: Skeleton IDs of the initial set of treenodes.
required: false
type: array
paramType: form
type:
- type: array
items:
type: string
description: |
array of arrays, each representing a unique skeleton path in
historic order, newest last.
required: true
"""
initial_user_id = request.GET.get('initial_user_id')
changes_after = request.GET.get('changes_after')
changes_before = request.GET.get('changes_before')
skeleton_ids = get_request_list(request.GET, 'skeleton_ids', map_fn=int)
init_constraints = ['project_id = %(project_id)s']
constraints = ['project_id = %(project_id)s']
if initial_user_id is not None:
init_constraints.append("cti.user_id = %(initial_user_id)s")
if changes_after:
init_constraints.append("edition_time > %(changes_after)s")
constraints.append("execution_time > %(changes_after)s")
if changes_before:
init_constraints.append("execution_time > %(changes_before)s")
constraints.append("execution_time > %(changes_before)s")
if skeleton_ids:
init_constraints.append('skeleton_id = ANY(ARRAY[%(skeleton_ids)s])')
if not init_constraints:
raise ValueError("Please provide at least one constraint")
# 1. Get all relevant initial transactions
# 2. Find treenode IDs modified by those transactions
# 3. Get all history and live table entries for those treenodes, ordered by
# transaction execution time, oldest last. History entries come first, live
# entries are last.
# 4. Collect all referenced skeleton IDs from ordered treenodes. This results in
# a skeleton ID path for each treenode. To reduce this to distinct paths, a
# textual representation is done for each (id:id:id…) and only distinct values
# are selected. This should allow then to get fragment skeleton ID changes
# through merges and splits.
cursor = connection.cursor()
cursor.execute("""
WITH skeleton_class AS (
SELECT id as class_id
FROM class
WHERE project_id = %(project_id)s
AND class_name = 'skeleton'
),
changed_treenodes AS (
SELECT t.id, t.skeleton_id, MIN(txid) as txid, MIN(edition_time) as edition_time
FROM (
/* Deleted skeletons from history */
-- TODO: It might be that we want really the initial
-- versions, and not all of them with each having the
-- minimum th.txid and edit time.
SELECT th.id as id, th.skeleton_id as skeleton_id, MIN(th.txid) as txid,
MIN(th.edition_time) AS edition_time
FROM treenode__history th
/* where th.exec_transaction_id = txs.transaction_id */
{init_constraints}
GROUP By th.id, th.skeleton_id
UNION ALL
/* Current skeletons */
select t.id as id, t.skeleton_id as skeleton_id, MIN(t.txid) as txid,
MIN(t.edition_time) AS edition_time
FROM treenode t
/* where t.txid = txs.transaction_id */
{init_constraints}
GROUP BY t.id, t.skeleton_id
) t
GROUP BY id, t.skeleton_id
),
all_changed_skeletons AS (
SELECT ct.id, ct.skeleton_id, -1 as pos, ct.edition_time, ct.txid
FROM changed_treenodes ct
UNION
SELECT th.id as treenode_id, th.skeleton_id, 0 as pos, th.edition_time, th.txid as txid
FROM changed_treenodes ct
JOIN treenode__history th
ON th.id = ct.id
WHERE th.txid > ct.txid
UNION
SELECT t.id, t.skeleton_id, 1 as pos, t.edition_time, t.txid
FROM changed_treenodes ct
JOIN treenode t
ON t.id = ct.id
WHERE t.txid > ct.txid
),
agg_skeletons AS (
SELECT string_agg(skeleton_id::text, ':' ORDER BY pos ASC, txid ASC) as key,
array_agg(skeleton_id ORDER BY pos ASC, txid ASC) AS skeleton_ids,
array_agg(txid ORDER BY pos ASC, txid ASC) AS txids,
max(pos) as present
/*array_agg(edition_time ORDER BY pos ASC, txid ASC) AS times*/
FROM all_changed_skeletons
GROUP BY id
)
/*
,agg_treenodes AS (
SELECT key, skeleton_ids[1]::text || '-' || skeleton_ids[array_length(skeleton_ids, 1)]::text as begin_end, count(*) as c, skeleton_ids, max(present) as present
FROM agg_skeletons
GROUP BY key, skeleton_ids
ORDER BY key
)
*/
SELECT skeleton_ids, count(*), max(present) FROM agg_skeletons
GROUP BY key, skeleton_ids
ORDER BY skeleton_ids[0], count(*) DESC;
/*
SELECT begin_end, SUM(c), max(present) from agg_treenodes
GROUP BY begin_end, skeleton_ids[1], skeleton_ids[array_length(skeleton_ids, 1)]
ORDER by skeleton_ids[1], sum(c) desc;
*/
""".format(**{
'init_constraints': ('WHERE ' if init_constraints else '') + ' AND '.join(init_constraints),
'constraints': ('WHERE ' if constraints else '') + ' AND '.join(constraints),
}), {
'project_id': project_id,
'initial_user_id': initial_user_id,
'changes_after': changes_after,
'changes_before': changes_before,
'skeleton_ids': skeleton_ids,
})
return JsonResponse(cursor.fetchall(), safe=False)
@api_view(['GET', 'POST'])
@requires_user_role(UserRole.Browse)
def import_info(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get information on imported nodes of a set of skeletons.
---
parameters:
- name: project_id
description: Project to operate in
type: integer
paramType: path
required: true
- name: skeleton_ids[]
description: IDs of skeletons to get import information for.
required: true
type: array
items:
type: integer
paramType: form
- name: with_treenodes
description: Whether to include IDs of all imported nodes in response.
type: boolean
paramType: form
required: false
defaultValue: false
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Invalid HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
with_treenodes = get_request_bool(data, 'with_treenodes', False)
if with_treenodes:
tn_projection = ', imported_treenodes'
tn_aggregate = ', array_agg(t.id) AS imported_treenodes'
else:
tn_projection = ''
tn_aggregate = ''
# For each passed in skeleton, select the initial transaction for each of
# its treenodes (i.e. when they were created) and find the ones that are
# labeled as "skeletons.import".
#
# Technically we need to add the following line to also heck for a matching
# execution time in case transaction wraparound recycled transaction IDs.
# This is no present danger, but should be employed at some point. The
# problem is that they the catmaid_transaction_info timestamp doesn't
# presently match the Django model defaults. See catmaid/catmaid#1949. It
# should however be no problem to enable this for SWC imports, since they
# don't use Django's ORM.
cursor = connection.cursor()
cursor.execute("""
SELECT query.skeleton_id, n_imported_treenodes {tn_projection}
FROM UNNEST(%(skeleton_ids)s::bigint[]) query(skeleton_id)
JOIN catmaid_skeleton_summary css
ON css.skeleton_id = query.skeleton_id
JOIN LATERAL (
SELECT COUNT(*) AS n_imported_treenodes {tn_aggregate}
FROM treenode t
JOIN LATERAL (
-- Get original transaction ID and creation time
SELECT txid, creation_time
FROM treenode__with_history th
WHERE th.id = t.id
ORDER BY edition_time ASC
LIMIT 1
) t_origin
ON TRUE
JOIN LATERAL (
SELECT label
FROM catmaid_transaction_info cti
WHERE cti.transaction_id = t_origin.txid
--TODO: Add transaction ID wraparound match protection. See
--comment above query.
--AND cti.execution_time = t_origin.creation_time
) t_origin_label
ON t_origin_label.label = 'skeletons.import'
WHERE t.skeleton_id = query.skeleton_id
) sub
ON TRUE
WHERE css.project_id = %(project_id)s
""".format(**{
'tn_projection': tn_projection,
'tn_aggregate': tn_aggregate,
}), {
'project_id': project_id,
'skeleton_ids': skeleton_ids,
})
if with_treenodes:
import_info = dict((c1, {
'n_imported_treenodes': c2,
'imported_treenodes': c3
}) for c1, c2, c3 in cursor.fetchall())
else:
import_info = dict((c1, {
'n_imported_treenodes': c2,
}) for c1, c2 in cursor.fetchall())
return JsonResponse(import_info)
@api_view(['GET', 'POST'])
@requires_user_role([UserRole.Browse])
def completeness(request:HttpRequest, project_id) -> JsonResponse:
"""Obtain completeness information for a set of skeleton IDs.
---
paramaters:
- name: project_id
description: Project of skeletons
type: integer
paramType: path
required: true
- name: skeleton_ids
description: IDs of the skeletons to get completeness for
required: true
type: array
items:
type: integer
paramType: form
- name: open_ends_percent
description: The number of allowed open ends in range [0,1].
type: float
paramType: form
default: 0.03
required: false
- name: min_nodes
description: |
The minimum number of nodes a complete skeleton has to have, e.g. to
exclude fragments.
type: int
paramType: form
default: 500
required: false
- name: min_cable
description: |
The minimum cable length in nm a complete skeleton has to have, e.g.
to exclude fragments.
type: float
paramType: form
default: 0
required: false
- name: ignore_fragments
description: |
Whether skeletons without a node tagged 'soma' or 'out to nerve'
should be ignored.
type: bool
paramType: form
default: true
required: false
"""
if request.method == 'GET':
data = request.GET
elif request.method == 'POST':
data = request.POST
else:
raise ValueError("Unsupported HTTP method: " + request.method)
skeleton_ids = get_request_list(data, 'skeleton_ids', map_fn=int)
if not skeleton_ids:
raise ValueError('Need at least one skeleton ID')
open_ends_percent = min(max(float(data.get('open_ends_percent', 0.03)), 0.0), 1.0)
min_nodes = max(0, int(data.get('min_nodes', 500)))
min_cable = max(0.0, float(data.get('min_cable', 0)))
ignore_fragments = get_request_bool(data, 'ignore_fragments', True)
completeness = get_completeness_data(project_id, skeleton_ids,
open_ends_percent, min_nodes, min_cable, ignore_fragments)
return JsonResponse(completeness, safe=False)
def get_completeness_data(project_id, skeleton_ids, open_ends_percent=0.03,
min_nodes=500, min_cable=0, ignore_fragments=True, include_data=False):
"""Obtain completeness information for a set of skeleton IDs. Returns a two
element list containing the input skeleton ID and a boolean, indicating
whether the skeleton can be considered complete.If <include_data> is True,
A skeleton is considered complete if it fullfills all the constraints
defined through parameters.
The <open_ends_percent> is a value in range [0,1] and represents the ratio
of open leaf nodes versus total leaf nodes. As leaf nodes are all nodes
counted that don't have child nodes plus the root node.
"""
if not project_id or not skeleton_ids:
raise ValueError('Need project ID and skeleton IDs')
classes = get_class_to_id_map(project_id, ['label'])
relations = get_relation_to_id_map(project_id, ['labeled_as'])
tests = []
extra_joins = []
params = {
'project_id': project_id,
'skeleton_ids': skeleton_ids,
'min_nodes': min_nodes,
'min_cable': min_cable,
'ignore_fragments': ignore_fragments,
'open_ends_percent': open_ends_percent,
'labeled_as': relations['labeled_as'],
}
extra_select = []
if include_data:
extra_select = ['css.num_nodes', 'css.cable_length', 'n_open_ends',
'n_ends', 'is_fragment']
if open_ends_percent < 1.0 or include_data:
tests.append('open_end_ratio < %(open_ends_percent)s')
params['end_label_ci_ids'] = list(ClassInstance.objects.filter(
project_id=project_id, class_column_id=classes['label'],
name__in=tracing.end_tags).values_list('id', flat=True))
extra_joins.append("""
-- Compute open end ratio
LEFT JOIN LATERAL (
SELECT n_open_ends::float / n_ends::float, n_open_ends, n_ends
FROM (
-- Tagged end nodes in skeleton
SELECT COUNT(*) AS n_ends,
COUNT(*) FILTER (WHERE n_tags = 0) AS n_open_ends
FROM (
SELECT t.id, COUNT(*) FILTER (WHERE tci.id IS NOT NULL) AS n_tags
FROM treenode t
LEFT JOIN treenode c
ON c.parent_id = t.id
LEFT JOIN treenode_class_instance tci
ON tci.treenode_id = t.id
AND tci.relation_id = %(labeled_as)s
AND tci.class_instance_id = ANY(%(end_label_ci_ids)s::bigint[])
WHERE (c.id IS NULL OR t.parent_id IS NULL)
AND t.skeleton_id = skeleton.id
-- Needed, because treenodes can have multiple tags
GROUP BY t.id
) categorized_ends(node_id, n_tags)
) end_counts(n_ends, n_open_ends)
) end_info(open_end_ratio, n_open_ends, n_ends)
ON TRUE
""")
if min_nodes > 0:
tests.append('css.num_nodes >= %(min_nodes)s')
if min_cable > 0:
tests.append('css.cable_length >= %(min_cable)s')
if ignore_fragments or include_data:
if ignore_fragments:
tests.append('NOT is_fragment')
# Find all skeleton IDs in the query set that have treenodes tagged with
# "soma" or "out to nerve"
non_fragment_tags = ['soma', 'out to nerve']
params['non_fragment_ci_ids'] = list(ClassInstance.objects.filter(
project_id=project_id, class_column_id=classes['label'],
name__in=non_fragment_tags).values_list('id', flat=True))
extra_joins.append("""
LEFT JOIN LATERAL (
SELECT NOT EXISTS(
SELECT 1
FROM treenode_class_instance tci
JOIN treenode t
ON t.id = tci.treenode_id
JOIN class_instance ci
ON ci.id = tci.class_instance_id
WHERE ci.id = ANY(%(non_fragment_ci_ids)s::bigint[])
AND t.skeleton_id = skeleton.id
AND tci.relation_id = %(labeled_as)s
)
) soma_labels(is_fragment)
ON TRUE
""")
# Default return value without filters.
if not tests:
tests.append('TRUE')
cursor = connection.cursor()
cursor.execute("""
SELECT skeleton.id,
{is_complete}
{extra_select}
FROM catmaid_skeleton_summary css
JOIN UNNEST(%(skeleton_ids)s::bigint[]) skeleton(id)
ON skeleton.id = css.skeleton_id
{extra_joins}
""".format(**{
'is_complete': ' AND '.join(tests),
'extra_select': (',' + ', '.join(extra_select)) if extra_select else '',
'extra_joins': '\n'.join(extra_joins),
}), params)
return cursor.fetchall()
| gpl-3.0 | -7,145,342,037,780,130,000 | 38.308575 | 176 | 0.597041 | false |
kosystem/Outliner | layout.py | 1 | 4571 | import wx
import random
from wx.lib.expando import ExpandoTextCtrl, EVT_ETC_LAYOUT_NEEDED
def randColor():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return (r, g, b)
class TextPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.textCtrl = ExpandoTextCtrl(
self,
size=(parent.Size[0]-20, -1),
style=wx.BORDER_NONE |
wx.TE_NO_VSCROLL)
# self.Bind(EVT_ETC_LAYOUT_NEEDED, self.OnRefit, self.textCtrl)
self.textCtrl.SetBackgroundColour('#e0e0e0')
box = wx.BoxSizer()
box.Add(self.textCtrl, 1, wx.EXPAND, 0)
self.SetSizer(box)
self.textCtrl.AppendText('1234 56 7890123 45678901234567912')
class RulerPanel(wx.Panel):
def __init__(self, parent, id, style=0):
wx.Panel.__init__(self, parent, id)
self.style = style
self.text = wx.StaticText(self, -1, '0', (40, 60))
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.SetMinSize((20, 30))
def OnPaint(self, event):
# anti aliasing
pdc = wx.PaintDC(self)
try:
dc = wx.GCDC(pdc)
except:
dc = pdc
dc.SetPen(wx.Pen('#a0a0a0'))
heightMargin = 10
width, height = self.Size[0], self.Size[1]
if self.style == 1: # Top
dc.DrawLine(0, heightMargin, width, heightMargin)
dc.DrawLine(width/2, heightMargin, width/2, height)
elif self.style == 2: # Mid
dc.DrawLine(width/2, 0, width/2, height)
dc.DrawLine(width/2, heightMargin, width, heightMargin)
elif self.style == 3: # Bottom
dc.DrawLine(width/2, 0, width/2, heightMargin)
dc.DrawLine(width/2, heightMargin, width, heightMargin)
else: # Single
dc.DrawLine(0, heightMargin, width, heightMargin)
class ContentsPanel(wx.Panel):
def __init__(self, parent, id, pos=(0, 0), size=(100, 50), style=0):
wx.Panel.__init__(self, parent, id, pos=pos, size=size)
self.SetBackgroundColour('#e0e0e0')
rulerPanel = RulerPanel(self, -1, style=style)
textPanel = TextPanel(self, -1)
hbox = wx.BoxSizer()
hbox.Add(rulerPanel, 0, wx.EXPAND, 0)
hbox.Add(textPanel, 1, wx.EXPAND | wx.TOP | wx.BOTTOM, 2)
self.SetSizer(hbox)
self.Bind(EVT_ETC_LAYOUT_NEEDED, self.OnRefit, textPanel.textCtrl)
self.OnRefit(EVT_ETC_LAYOUT_NEEDED)
def OnRefit(self, evt):
self.Fit()
# print 'Contents:', self.Size
class TreeLayout(wx.Panel):
def __init__(self, parent, id, text, style=0):
wx.Panel.__init__(self, parent, id)
self.SetBackgroundColour('#ededed')
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(hbox)
contentPanel = ContentsPanel(self, -1, size=(200, -1), style=style)
self.subPanel = wx.Panel(self, -1)
self.subPanel.SetBackgroundColour(randColor())
hbox.Add(contentPanel, 0, wx.EXPAND | wx.ALL, 0)
hbox.Add(self.subPanel, 1, wx.EXPAND | wx.ALL, 0)
self.contents = wx.BoxSizer(wx.VERTICAL)
self.subPanel.SetSizer(self.contents)
class TreeLayoutFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(500, 400))
panel = wx.Panel(self, -1)
panel.SetBackgroundColour(randColor())
vbox = wx.BoxSizer(wx.VERTICAL)
midPan1 = TreeLayout(panel, -1, '1', style=1)
midPan2 = TreeLayout(panel, -1, '2', style=3)
midPan11 = TreeLayout(midPan1.subPanel, -1, '11')
midPan21 = TreeLayout(midPan2.subPanel, -1, '21', style=1)
midPan22 = TreeLayout(midPan2.subPanel, -1, '22', style=2)
midPan23 = TreeLayout(midPan2.subPanel, -1, '23', style=2)
midPan24 = TreeLayout(midPan2.subPanel, -1, '24', style=3)
midPan1.contents.Add(midPan11, 1, wx.EXPAND | wx.ALL, 0)
midPan2.contents.Add(midPan21, 1, wx.EXPAND | wx.ALL, 0)
midPan2.contents.Add(midPan22, 1, wx.EXPAND | wx.ALL, 0)
midPan2.contents.Add(midPan23, 1, wx.EXPAND | wx.ALL, 0)
midPan2.contents.Add(midPan24, 1, wx.EXPAND | wx.ALL, 0)
vbox.Add(midPan1, 0, wx.EXPAND | wx.ALL, 0)
vbox.Add(midPan2, 0, wx.EXPAND | wx.ALL, 0)
panel.SetSizer(vbox)
self.Centre()
self.Show(True)
if __name__ == '__main__':
app = wx.App()
TreeLayoutFrame(None, -1, 'layout.py')
app.MainLoop()
| mit | -6,932,235,869,819,650,000 | 32.610294 | 75 | 0.591555 | false |
mitsei/dlkit | dlkit/abstract_osid/learning/queries.py | 1 | 61163 | """Implementations of learning abstract base class queries."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class ObjectiveQuery:
"""This is the query for searching objectives.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears the assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available for querying activities.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def match_any_assessment(self, match):
"""Matches an objective that has any assessment assigned.
:param match: ``true`` to match objectives with any assessment, ``false`` to match objectives with no assessment
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears the assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_knowledge_category_id(self, grade_id, match):
"""Sets the knowledge category ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_knowledge_category_id_terms(self):
"""Clears the knowledge category ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
knowledge_category_id_terms = property(fdel=clear_knowledge_category_id_terms)
@abc.abstractmethod
def supports_knowledge_category_query(self):
"""Tests if a ``GradeQuery`` is available for querying knowledge categories.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_knowledge_category_query(self):
"""Gets the query for a knowledge category.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_knowledge_category_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_knowledge_category_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
knowledge_category_query = property(fget=get_knowledge_category_query)
@abc.abstractmethod
def match_any_knowledge_category(self, match):
"""Matches an objective that has any knowledge category.
:param match: ``true`` to match objectives with any knowledge category, ``false`` to match objectives with no knowledge category
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_knowledge_category_terms(self):
"""Clears the knowledge category terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
knowledge_category_terms = property(fdel=clear_knowledge_category_terms)
@abc.abstractmethod
def match_cognitive_process_id(self, grade_id, match):
"""Sets the cognitive process ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_cognitive_process_id_terms(self):
"""Clears the cognitive process ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
cognitive_process_id_terms = property(fdel=clear_cognitive_process_id_terms)
@abc.abstractmethod
def supports_cognitive_process_query(self):
"""Tests if a ``GradeQuery`` is available for querying cognitive processes.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_cognitive_process_query(self):
"""Gets the query for a cognitive process.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_cognitive_process_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_cognitive_process_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
cognitive_process_query = property(fget=get_cognitive_process_query)
@abc.abstractmethod
def match_any_cognitive_process(self, match):
"""Matches an objective that has any cognitive process.
:param match: ``true`` to match objectives with any cognitive process, ``false`` to match objectives with no cognitive process
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_cognitive_process_terms(self):
"""Clears the cognitive process terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
cognitive_process_terms = property(fdel=clear_cognitive_process_terms)
@abc.abstractmethod
def match_activity_id(self, activity_id, match):
"""Sets the activity ``Id`` for this query.
:param activity_id: an activity ``Id``
:type activity_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``activity_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_activity_id_terms(self):
"""Clears the activity ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
activity_id_terms = property(fdel=clear_activity_id_terms)
@abc.abstractmethod
def supports_activity_query(self):
"""Tests if an ``ActivityQuery`` is available for querying activities.
:return: ``true`` if an activity query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_activity_query(self):
"""Gets the query for an activity.
Multiple retrievals produce a nested ``OR`` term.
:return: the activity query
:rtype: ``osid.learning.ActivityQuery``
:raise: ``Unimplemented`` -- ``supports_activity_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.*
"""
return # osid.learning.ActivityQuery
activity_query = property(fget=get_activity_query)
@abc.abstractmethod
def match_any_activity(self, match):
"""Matches an objective that has any related activity.
:param match: ``true`` to match objectives with any activity, ``false`` to match objectives with no activity
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_activity_terms(self):
"""Clears the activity terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
activity_terms = property(fdel=clear_activity_terms)
@abc.abstractmethod
def match_requisite_objective_id(self, requisite_objective_id, match):
"""Sets the requisite objective ``Id`` for this query.
:param requisite_objective_id: a requisite objective ``Id``
:type requisite_objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``requisite_objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_requisite_objective_id_terms(self):
"""Clears the requisite objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
requisite_objective_id_terms = property(fdel=clear_requisite_objective_id_terms)
@abc.abstractmethod
def supports_requisite_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available for querying requisite objectives.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_requisite_objective_query(self):
"""Gets the query for a requisite objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_requisite_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_requisite_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
requisite_objective_query = property(fget=get_requisite_objective_query)
@abc.abstractmethod
def match_any_requisite_objective(self, match):
"""Matches an objective that has any related requisite.
:param match: ``true`` to match objectives with any requisite, ``false`` to match objectives with no requisite
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_requisite_objective_terms(self):
"""Clears the requisite objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
requisite_objective_terms = property(fdel=clear_requisite_objective_terms)
@abc.abstractmethod
def match_dependent_objective_id(self, dependent_objective_id, match):
"""Sets the dependent objective ``Id`` to query objectives dependent on the given objective.
:param dependent_objective_id: a dependent objective ``Id``
:type dependent_objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``dependent_objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_dependent_objective_id_terms(self):
"""Clears the dependent objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
dependent_objective_id_terms = property(fdel=clear_dependent_objective_id_terms)
@abc.abstractmethod
def supports_depndent_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available for querying dependent objectives.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_dependent_objective_query(self):
"""Gets the query for a dependent objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_dependent_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_dependent_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
dependent_objective_query = property(fget=get_dependent_objective_query)
@abc.abstractmethod
def match_any_dependent_objective(self, match):
"""Matches an objective that has any related dependents.
:param match: ``true`` to match objectives with any dependent, ``false`` to match objectives with no dependents
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_dependent_objective_terms(self):
"""Clears the dependent objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
dependent_objective_terms = property(fdel=clear_dependent_objective_terms)
@abc.abstractmethod
def match_equivalent_objective_id(self, equivalent_objective_id, match):
"""Sets the equivalent objective ``Id`` to query equivalents.
:param equivalent_objective_id: an equivalent objective ``Id``
:type equivalent_objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``equivalent_objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_equivalent_objective_id_terms(self):
"""Clears the equivalent objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
equivalent_objective_id_terms = property(fdel=clear_equivalent_objective_id_terms)
@abc.abstractmethod
def supports_equivalent_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available for querying equivalent objectives.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_equivalent_objective_query(self):
"""Gets the query for an equivalent objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_equivalent_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_equivalent_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
equivalent_objective_query = property(fget=get_equivalent_objective_query)
@abc.abstractmethod
def match_any_equivalent_objective(self, match):
"""Matches an objective that has any related equivalents.
:param match: ``true`` to match objectives with any equivalent, ``false`` to match objectives with no equivalents
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_equivalent_objective_terms(self):
"""Clears the equivalent objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
equivalent_objective_terms = property(fdel=clear_equivalent_objective_terms)
@abc.abstractmethod
def match_ancestor_objective_id(self, objective_id, match):
"""Sets the objective ``Id`` for this query to match objectives that have the specified objective as an ancestor.
:param objective_id: an objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_objective_id_terms(self):
"""Clears the ancestor objective ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_objective_id_terms = property(fdel=clear_ancestor_objective_id_terms)
@abc.abstractmethod
def supports_ancestor_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ancestor_objective_query(self):
"""Gets the query for an objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_ancestor_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
ancestor_objective_query = property(fget=get_ancestor_objective_query)
@abc.abstractmethod
def match_any_ancestor_objective(self, match):
"""Matches objectives that have any ancestor.
:param match: ``true`` to match objective with any ancestor, ``false`` to match root objectives
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_objective_terms(self):
"""Clears the ancestor objective query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_objective_terms = property(fdel=clear_ancestor_objective_terms)
@abc.abstractmethod
def match_descendant_objective_id(self, objective_id, match):
"""Sets the objective ``Id`` for this query to match objectives that have the specified objective as a descendant.
:param objective_id: an objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_objective_id_terms(self):
"""Clears the descendant objective ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_objective_id_terms = property(fdel=clear_descendant_objective_id_terms)
@abc.abstractmethod
def supports_descendant_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_descendant_objective_query(self):
"""Gets the query for an objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_descendant_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
descendant_objective_query = property(fget=get_descendant_objective_query)
@abc.abstractmethod
def match_any_descendant_objective(self, match):
"""Matches objectives that have any ancestor.
:param match: ``true`` to match objectives with any ancestor, ``false`` to match leaf objectives
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_objective_terms(self):
"""Clears the descendant objective query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_objective_terms = property(fdel=clear_descendant_objective_terms)
@abc.abstractmethod
def match_objective_bank_id(self, objective_bank_id, match):
"""Sets the objective bank ``Id`` for this query.
:param objective_bank_id: an objective bank ``Id``
:type objective_bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_bank_id_terms(self):
"""Clears the objective bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_id_terms = property(fdel=clear_objective_bank_id_terms)
@abc.abstractmethod
def supports_objective_bank_query(self):
"""Tests if a ``ObjectiveBankQuery`` is available for querying objective banks.
:return: ``true`` if an objective bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_bank_query(self):
"""Gets the query for an objective bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective bank query
:rtype: ``osid.learning.ObjectiveBankQuery``
:raise: ``Unimplemented`` -- ``supports_objective_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveBankQuery
objective_bank_query = property(fget=get_objective_bank_query)
@abc.abstractmethod
def clear_objective_bank_terms(self):
"""Clears the objective bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_terms = property(fdel=clear_objective_bank_terms)
@abc.abstractmethod
def get_objective_query_record(self, objective_record_type):
"""Gets the objective query record corresponding to the given ``Objective`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param objective_record_type: an objective query record type
:type objective_record_type: ``osid.type.Type``
:return: the objective query record
:rtype: ``osid.learning.records.ObjectiveQueryRecord``
:raise: ``NullArgument`` -- ``objective_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(objective_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ObjectiveQueryRecord
class ActivityQuery:
"""This is the query for searching activities.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_objective_id(self, objective_id, match):
"""Sets the objective ``Id`` for this query.
:param objective_id: an objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_id_terms(self):
"""Clears the objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_id_terms = property(fdel=clear_objective_id_terms)
@abc.abstractmethod
def supports_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available for querying objectives.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_query(self):
"""Gets the query for an objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
objective_query = property(fget=get_objective_query)
@abc.abstractmethod
def clear_objective_terms(self):
"""Clears the objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_terms = property(fdel=clear_objective_terms)
@abc.abstractmethod
def match_asset_id(self, asset_id, match):
"""Sets the asset ``Id`` for this query.
:param asset_id: an asset ``Id``
:type asset_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``asset_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_asset_id_terms(self):
"""Clears the asset ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
asset_id_terms = property(fdel=clear_asset_id_terms)
@abc.abstractmethod
def supports_asset_query(self):
"""Tests if an ``AssetQuery`` is available for querying objectives.
:return: ``true`` if an robjective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_asset_query(self):
"""Gets the query for an asset.
Multiple retrievals produce a nested ``OR`` term.
:return: the asset query
:rtype: ``osid.repository.AssetQuery``
:raise: ``Unimplemented`` -- ``supports_asset_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.repository.AssetQuery
asset_query = property(fget=get_asset_query)
@abc.abstractmethod
def match_any_asset(self, match):
"""Matches an activity that has any objective assigned.
:param match: ``true`` to match activities with any asset, ``false`` to match activities with no asset
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_asset_terms(self):
"""Clears the asset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
asset_terms = property(fdel=clear_asset_terms)
@abc.abstractmethod
def match_course_id(self, course_id, match):
"""Sets the course ``Id`` for this query.
:param course_id: a course ``Id``
:type course_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``course_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_course_id_terms(self):
"""Clears the course ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
course_id_terms = property(fdel=clear_course_id_terms)
@abc.abstractmethod
def supports_course_query(self):
"""Tests if a ``CourseQuery`` is available for querying courses.
:return: ``true`` if a course query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_course_query(self):
"""Gets the query for a course.
Multiple retrievals produce a nested ``OR`` term.
:return: the course query
:rtype: ``osid.course.CourseQuery``
:raise: ``Unimplemented`` -- ``supports_course_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_course_query()`` is ``true``.*
"""
return # osid.course.CourseQuery
course_query = property(fget=get_course_query)
@abc.abstractmethod
def match_any_course(self, match):
"""Matches an activity that has any course assigned.
:param match: ``true`` to match activities with any courses, ``false`` to match activities with no courses
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_course_terms(self):
"""Clears the course terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
course_terms = property(fdel=clear_course_terms)
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears the assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available for querying assessments.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for a assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def match_any_assessment(self, match):
"""Matches an activity that has any assessment assigned.
:param match: ``true`` to match activities with any assessments, ``false`` to match activities with no assessments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears the assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_objective_bank_id(self, objective_bank_id, match):
"""Sets the objective bank ``Id`` for this query.
:param objective_bank_id: an objective bank ``Id``
:type objective_bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_bank_id_terms(self):
"""Clears the objective bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_id_terms = property(fdel=clear_objective_bank_id_terms)
@abc.abstractmethod
def supports_objective_bank_query(self):
"""Tests if a ``ObjectiveBankQuery`` is available for querying resources.
:return: ``true`` if an objective bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_bank_query(self):
"""Gets the query for an objective bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective bank query
:rtype: ``osid.learning.ObjectiveBankQuery``
:raise: ``Unimplemented`` -- ``supports_objective_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveBankQuery
objective_bank_query = property(fget=get_objective_bank_query)
@abc.abstractmethod
def clear_objective_bank_terms(self):
"""Clears the objective bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_terms = property(fdel=clear_objective_bank_terms)
@abc.abstractmethod
def get_activity_query_record(self, activity_record_type):
"""Gets the activity query record corresponding to the given ``Activity`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param activity_record_type: an activity query record type
:type activity_record_type: ``osid.type.Type``
:return: the activity query record
:rtype: ``osid.learning.records.ActivityQueryRecord``
:raise: ``NullArgument`` -- ``activity_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(activity_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ActivityQueryRecord
class ProficiencyQuery:
"""This is the query for searching proficiencies.
Each method match specifies an ``AND`` term while multiple
invocations of the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_resource_id(self, resource_id, match):
"""Sets the resource ``Id`` for this query.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:param match: ``true`` if a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_resource_id_terms(self):
"""Clears the resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
resource_id_terms = property(fdel=clear_resource_id_terms)
@abc.abstractmethod
def supports_resource_query(self):
"""Tests if a ``ResourceQuery`` is available.
:return: ``true`` if a resource query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_resource_query(self):
"""Gets the query for a resource.
Multiple retrievals produce a nested ``OR`` term.
:return: the resource query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_resource_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
resource_query = property(fget=get_resource_query)
@abc.abstractmethod
def clear_resource_terms(self):
"""Clears the resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
resource_terms = property(fdel=clear_resource_terms)
@abc.abstractmethod
def match_objective_id(self, objective_id, match):
"""Sets the objective ``Id`` for this query.
:param objective_id: an objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_id_terms(self):
"""Clears the objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_id_terms = property(fdel=clear_objective_id_terms)
@abc.abstractmethod
def supports_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available for querying objectives.
:return: ``true`` if an robjective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_query(self):
"""Gets the query for an objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
objective_query = property(fget=get_objective_query)
@abc.abstractmethod
def match_any_objective(self, match):
"""Matches an activity that has any objective assigned.
:param match: ``true`` to match activities with any objective, ``false`` to match activities with no objective
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_terms(self):
"""Clears the objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_terms = property(fdel=clear_objective_terms)
@abc.abstractmethod
def match_completion(self, start, end, match):
"""Sets the completion for this query to match completion percentages between the given range inclusive.
:param start: start of range
:type start: ``decimal``
:param end: end of range
:type end: ``decimal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_completion_terms(self):
"""Clears the completion terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
completion_terms = property(fdel=clear_completion_terms)
@abc.abstractmethod
def match_minimum_completion(self, completion, match):
"""Sets the minimum completion for this query.
:param completion: completion percentage
:type completion: ``decimal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_minimum_completion_terms(self):
"""Clears the minimum completion terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
minimum_completion_terms = property(fdel=clear_minimum_completion_terms)
@abc.abstractmethod
def match_level_id(self, grade_id, match):
"""Sets the level grade ``Id`` for this query.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_id_terms(self):
"""Clears all level ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_id_terms = property(fdel=clear_level_id_terms)
@abc.abstractmethod
def supports_level_query(self):
"""Tests if a ``GradeQuery`` is available.
:return: ``true`` if a grade query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_level_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
:return: the grade query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_level_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_level_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
level_query = property(fget=get_level_query)
@abc.abstractmethod
def match_any_level(self, match):
"""Matches an assessment offered that has any level assigned.
:param match: ``true`` to match offerings with any level, ``false`` to match offerings with no levsls
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_level_terms(self):
"""Clears all level terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
level_terms = property(fdel=clear_level_terms)
@abc.abstractmethod
def match_objective_bank_id(self, objective_bank_id, match):
"""Sets the objective bank ``Id`` for this query.
:param objective_bank_id: an objective bank ``Id``
:type objective_bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_bank_id_terms(self):
"""Clears the objective bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_id_terms = property(fdel=clear_objective_bank_id_terms)
@abc.abstractmethod
def supports_objective_bank_query(self):
"""Tests if a ``ObjectiveBankQuery`` is available for querying resources.
:return: ``true`` if an objective bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_bank_query(self):
"""Gets the query for an objective bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective bank query
:rtype: ``osid.learning.ObjectiveBankQuery``
:raise: ``Unimplemented`` -- ``supports_objective_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveBankQuery
objective_bank_query = property(fget=get_objective_bank_query)
@abc.abstractmethod
def clear_objective_bank_terms(self):
"""Clears the objective bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_bank_terms = property(fdel=clear_objective_bank_terms)
@abc.abstractmethod
def get_proficiency_query_record(self, proficiency_record_type):
"""Gets the proficiency query record corresponding to the given ``Proficiency`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param proficiency_record_type: a proficiency offered record type
:type proficiency_record_type: ``osid.type.Type``
:return: the proficiency offered query record
:rtype: ``osid.learning.records.ProficiencyQueryRecord``
:raise: ``NullArgument`` -- ``proficiency_offered_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(proficiency_offered_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ProficiencyQueryRecord
class ObjectiveBankQuery:
"""This is the query for searching objective banks.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_objective_id(self, objective_id, match):
"""Sets the objective ``Id`` for this query.
:param objective_id: an objective ``Id``
:type objective_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_id_terms(self):
"""Clears the objective ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_id_terms = property(fdel=clear_objective_id_terms)
@abc.abstractmethod
def supports_objective_query(self):
"""Tests if an ``ObjectiveQuery`` is available.
:return: ``true`` if an objective query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_query(self):
"""Gets the query for an objective.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective query
:rtype: ``osid.learning.ObjectiveQuery``
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuery
objective_query = property(fget=get_objective_query)
@abc.abstractmethod
def match_any_objective(self, match):
"""Matches an objective bank that has any objective assigned.
:param match: ``true`` to match objective banks with any objective, ``false`` to match objective banks with no objectives
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_objective_terms(self):
"""Clears the objective terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
objective_terms = property(fdel=clear_objective_terms)
@abc.abstractmethod
def match_activity_id(self, activity_id, match):
"""Sets the activity ``Id`` for this query.
:param activity_id: an activity ``Id``
:type activity_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``activity_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_activity_id_terms(self):
"""Clears the activity ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
activity_id_terms = property(fdel=clear_activity_id_terms)
@abc.abstractmethod
def supports_activity_query(self):
"""Tests if a ``ActivityQuery`` is available for querying activities.
:return: ``true`` if an activity query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_activity_query(self):
"""Gets the query for an activity.
Multiple retrievals produce a nested ``OR`` term.
:return: the activity query
:rtype: ``osid.learning.ActivityQuery``
:raise: ``Unimplemented`` -- ``supports_activity_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.*
"""
return # osid.learning.ActivityQuery
activity_query = property(fget=get_activity_query)
@abc.abstractmethod
def match_any_activity(self, match):
"""Matches an objective bank that has any activity assigned.
:param match: ``true`` to match objective banks with any activity, ``false`` to match objective banks with no activities
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_activity_terms(self):
"""Clears the activity terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
activity_terms = property(fdel=clear_activity_terms)
@abc.abstractmethod
def match_ancestor_objective_bank_id(self, objective_bank_id, match):
"""Sets the objective bank ``Id`` for this query to match objective banks that have the specified objective bank as an ancestor.
:param objective_bank_id: an objective bank ``Id``
:type objective_bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_objective_bank_id_terms(self):
"""Clears the ancestor objective bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_objective_bank_id_terms = property(fdel=clear_ancestor_objective_bank_id_terms)
@abc.abstractmethod
def supports_ancestor_objective_bank_query(self):
"""Tests if a ``ObjectiveBankQuery`` is available for querying ancestor objective banks.
:return: ``true`` if an objective bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ancestor_objective_bank_query(self):
"""Gets the query for an objective bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective bank query
:rtype: ``osid.learning.ObjectiveBankQuery``
:raise: ``Unimplemented`` -- ``supports_ancestor_objective_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_calndar_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveBankQuery
ancestor_objective_bank_query = property(fget=get_ancestor_objective_bank_query)
@abc.abstractmethod
def match_any_ancestor_objective_bank(self, match):
"""Matches an objective bank that has any ancestor.
:param match: ``true`` to match objective banks with any ancestor, ``false`` to match root objective banks
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_objective_bank_terms(self):
"""Clears the ancestor objective bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_objective_bank_terms = property(fdel=clear_ancestor_objective_bank_terms)
@abc.abstractmethod
def match_descendant_objective_bank_id(self, objective_bank_id, match):
"""Sets the objective bank ``Id`` for this query to match objective banks that have the specified objective bank as a descendant.
:param objective_bank_id: an objective bank ``Id``
:type objective_bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_objective_bank_id_terms(self):
"""Clears the descendant objective bank ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_objective_bank_id_terms = property(fdel=clear_descendant_objective_bank_id_terms)
@abc.abstractmethod
def supports_descendant_objective_bank_query(self):
"""Tests if a ``ObjectiveBankQuery`` is available for querying descendant objective banks.
:return: ``true`` if an objective bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_descendant_objective_bank_query(self):
"""Gets the query for an objective bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the objective bank query
:rtype: ``osid.learning.ObjectiveBankQuery``
:raise: ``Unimplemented`` -- ``supports_descendant_objective_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_calndar_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveBankQuery
descendant_objective_bank_query = property(fget=get_descendant_objective_bank_query)
@abc.abstractmethod
def match_any_descendant_objective_bank(self, match):
"""Matches an objective bank that has any descendant.
:param match: ``true`` to match objective banks with any descendant, ``false`` to match leaf objective banks
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_objective_bank_terms(self):
"""Clears the descendant objective bank terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_objective_bank_terms = property(fdel=clear_descendant_objective_bank_terms)
@abc.abstractmethod
def get_objective_bank_query_record(self, objective_bank_record_type):
"""Gets the objective bank query record corresponding to the given ``ObjectiveBank`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param objective_bank_record_type: an objective bank record type
:type objective_bank_record_type: ``osid.type.Type``
:return: the objective bank query record
:rtype: ``osid.learning.records.ObjectiveBankQueryRecord``
:raise: ``NullArgument`` -- ``objective_bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(objective_bank_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ObjectiveBankQueryRecord
| mit | -5,389,931,156,146,499,000 | 28.405288 | 137 | 0.626686 | false |
viable-hartman/DashboardDirector | fabric_bolt/core/waithelper.py | 1 | 2710 | #!/usr/bin/env python
"""
Wait for a certain PID to terminate and check for PID existance (POSIX).
"""
import os
import time
import errno
class TimeoutExpired(Exception):
pass
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def wait_pid(pid, timeout=None):
"""Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired (if specified).
"""
def check_timeout(delay):
if timeout is not None:
if time.time() >= stop_at:
raise TimeoutExpired
time.sleep(delay)
return min(delay * 2, 0.04)
if timeout is not None:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
delay = 0.0001
while 1:
try:
retpid, status = waitcall()
except OSError, err:
if err.errno == errno.EINTR:
delay = check_timeout(delay)
continue
elif err.errno == errno.ECHILD:
# This has two meanings:
# - pid is not a child of os.getpid() in which case
# we keep polling until it's gone
# - pid never existed in the first place
# In both cases we'll eventually return None as we
# can't determine its exit status code.
while 1:
if pid_exists(pid):
delay = check_timeout(delay)
else:
return
else:
raise
else:
if retpid == 0:
# WNOHANG was used, pid is still running
delay = check_timeout(delay)
continue
# process exited due to a signal; return the integer of
# that signal
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("unknown process exit status")
| mit | -3,769,212,887,919,976,000 | 29.795455 | 72 | 0.547232 | false |
AusTac/parma | b3/plugins/httpytail.py | 1 | 12820 | #
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2010 GrosBedo
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# CHANGELOG:
# 2010-09-04 - 0.1 - GrosBedo
# Initial release, with htaccess authentication support.
# 2011-03-17 - 0.2 - Courgette
# Make sure that maxGapBytes is never exceeded
# 2011-04-27 - 0.2.1 - 82ndab-Bravo17
# Auto assign of unique local games_mp log file
# 2011-05-26 - 0.2.2 - 82ndab-Bravo17
# * Append to local log implemented
# 22/05/2012 - 0.2.3 - Courgette
# * local_game_log config option can now use the @conf and @b3 shortcuts
# 19/02/2013 - 1.0 - Courgette
# * fix issue when public_ip and rcon_ip are different in b3.xml or when a domain name is used in place of an IP
__version__ = '1.0'
__author__ = 'GrosBedo, 82ndab-Bravo17, Courgette'
import b3, threading
from b3 import functions
import b3.events
import b3.plugin
import os.path
import time
import re
import sys
import urllib2, urllib
user_agent = "B3 Httpytail plugin/%s" % __version__
#--------------------------------------------------------------------------------------------------
class HttpytailPlugin(b3.plugin.Plugin):
### settings
_maxGap = 20480 # max gap in bytes between remote file and local file
_waitBeforeReconnect = 15 # time (in sec) to wait before reconnecting after loosing HTTP connection :
_connectionTimeout = 30
requiresConfigFile = False
httpconfig = None
buffer = None
_remoteFileOffset = None
_nbConsecutiveConnFailure = 0
_logAppend = False
_httpdelay = 0.150
def onStartup(self):
versionsearch = re.search("^((?P<mainversion>[0-9]).(?P<lowerversion>[0-9]+)?)", sys.version)
version = int(versionsearch.group(3))
if version < 6:
self.error('Python Version %s, this is not supported and may lead to hangs. Please update Python to 2.6' % versionsearch.group(1))
self.console.die()
if self.console.config.has_option('server', 'delay'):
self._httpdelay = self.console.config.getfloat('server', 'delay')
if self.console.config.has_option('server', 'local_game_log'):
self.lgame_log = self.console.config.getpath('server', 'local_game_log')
else:
self.lgame_log = os.path.normpath(os.path.expanduser(self.console.input.name))
self.debug('Local Game Log is %s' % self.lgame_log)
if self.console.config.get('server','game_log')[0:7] == 'http://' :
self.initThread(self.console.config.get('server','game_log'))
if self.console.config.has_option('server', 'log_append'):
self._logAppend = self.console.config.getboolean('server', 'log_append')
else:
self._logAppend = False
def onLoadConfig(self):
try:
self._connectionTimeout = self.config.getint('settings', 'timeout')
except:
self.warning("Error reading timeout from config file. Using default value")
self.info("HTTP connection timeout: %s" % self._connectionTimeout)
try:
self._maxGap = self.config.getint('settings', 'maxGapBytes')
except:
self.warning("Error reading maxGapBytes from config file. Using default value")
self.info("Maximum gap allowed between remote and local gamelog: %s bytes" % self._maxGap)
def initThread(self, httpfileDSN):
self.httpconfig = functions.splitDSN(httpfileDSN)
self.url = httpfileDSN
thread1 = threading.Thread(target=self.update)
self.info("Starting httpytail thread")
thread1.start()
class DiffURLOpener(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
"""Create sub-class in order to overide error 206. This error means a
partial file is being sent,
which is ok in this case. Do nothing with this error.
"""
def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
pass
def update(self):
try:
self.file = open(self.lgame_log, 'ab')
self.file.write('\r\n')
self.file.write('B3 has been restarted\r\n')
self.file.write('\r\n')
self.file.close()
except Exception, e:
if hasattr(e, 'reason'):
self.error(str(e.reason))
if hasattr(e, 'code'):
self.error(str(e.code))
self.debug(str(e))
while self.console.working:
try:
# Opening the local temporary file
self.file = open(self.lgame_log, 'ab')
# Crafting the HTTP request
# - user agent header
headers = { 'User-Agent' : user_agent }
# - file url
if self.httpconfig['port']:
logurl = self.httpconfig['protocol']+'://'+self.httpconfig['host']+':'+self.httpconfig['port']+'/'+self.httpconfig['path']
else:
logurl = self.httpconfig['protocol']+'://'+self.httpconfig['host']+'/'+self.httpconfig['path']
req = urllib2.Request(logurl, None, headers)
# - htaccess authentication
# we login if the file is protected by a .htaccess and .htpasswd and the user specified a username and password in the b3 config (eg : http://user:password@host/path)
if self.httpconfig['user']:
username = self.httpconfig['user']
if self.httpconfig['password']:
password = self.httpconfig['password']
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Add the username and password.
# If we knew the realm, we could use it instead of ``None``.
top_level_url = logurl
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
# We store these parameters in an opener
opener = urllib2.build_opener(handler)
else:
# Else if no authentication is needed, then we create a standard opener
opener = urllib2.build_opener()
# Opening the full file and detect its size
webFile = opener.open(req)
urllib2.install_opener(opener)
filestats = webFile.info()
remoteSize = filestats.getheader('Content-Length')
webFile.close() # We close the remote connection as soon as possible to avoid spamming the server, and thus blacklisting us for an amount of time
# If we just started B3, we move the cursor to the current file size
if self._remoteFileOffset is None:
self._remoteFileOffset = remoteSize
# debug line
#self.debug('Diff - current cursor: %s - remote file size: %s' % (str(self._remoteFileOffset), str(remoteSize)) ) # please leave this debug line, it can be very useful for users to catch some weird things happening without errors, like if the webserver redirects the request because of too many connections (b3/delay is too short)
# Detecting log rotation if remote file size is lower than our current cursor position
if remoteSize < self._remoteFileOffset:
self.debug("remote file rotation detected")
self._remoteFileOffset = 0
# Fetching the diff of the remote file if our cursor is lower than the remote file size
if remoteSize > self._remoteFileOffset:
# For that, we use a custom made opener so that we can download only the diff between what has been added since last cycle
DiffURLOpener = self.DiffURLOpener()
httpopener = urllib2.build_opener(DiffURLOpener)
b1 = self._remoteFileOffset
b2 = remoteSize
if int(b2) - int(b1) > self._maxGap:
b1 = int(b2) - self._maxGap
# We add the Range header here, this is the one permitting to fetch only a part of an http remote file
range_bytes = "bytes=%s-%s" % (b1, b2)
self.verbose("requesting range %s" % range_bytes)
req.add_header("Range",range_bytes)
# Opening the section we want from the remote file
webFileDiff = httpopener.open(req)
# Adding the difference to our file (the file is cleaned at each startup by b3, in parser.py)
self.file.write(webFileDiff.read())
# We update the current cursor position to the size of the remote file
self._remoteFileOffset = remoteSize
self.verbose("%s bytes downloaded" % webFileDiff.info().getheader('Content-Length'))
# Finally, we close the distant file
webFileDiff.close()
# Closing the local temporary file
self.file.close()
except Exception, e:
if hasattr(e, 'reason'):
self.error(str(e.reason))
if hasattr(e, 'code'):
self.error(str(e.code))
self.debug(str(e))
except IOError, e:
if hasattr(e, 'reason'):
self.error('Failed to reach the server. Reason : %s' % str(e.reason))
if hasattr(e, 'code'):
self.error('The server could not fulfill the request. Error code : %s' % str(e.code))
self.debug(str(e))
self.file.close()
self.debug('http error: resetting local log file?')
if self._logAppend:
try:
self.file = open(self.lgame_log, 'ab')
self.file.write('\r\n')
self.file.write('B3 has restarted writing the log file\r\n')
self.file.write('\r\n')
except:
self.file = open(self.lgame_log, 'w')
else:
self.file = open(self.lgame_log, 'w')
self.file.close()
self.file = open(self.lgame_log, 'ab')
try:
self.webFile.close()
self.webFileDiff.close()
self.debug('HTTP Connection Closed')
except:
pass
webFile = None
if self._nbConsecutiveConnFailure <= 30:
time.sleep(1)
else:
self.debug('too many failures, sleeping %s sec' % self._waitBeforeReconnect)
time.sleep(self._waitBeforeReconnect)
time.sleep(self._httpdelay)
self.verbose("B3 is down, stopping Httpytail thread")
try:
webFile.close()
except:
pass
try:
self.file.close()
except:
pass
if __name__ == '__main__':
from b3.fake import fakeConsole
print "------------------------------------"
config = b3.config.XmlConfigParser()
config.setXml("""
<configuration plugin="httpytail">
<settings name="settings">
<set name="timeout">15</set>
<set name="maxGapBytes">1024</set>
</settings>
</configuration>
""")
p = HttpytailPlugin(fakeConsole, config)
p.onStartup()
p._httpdelay = 5
p.initThread('http://www.somewhere.tld/somepath/somefile.log')
time.sleep(300)
fakeConsole.shutdown()
time.sleep(8) | gpl-2.0 | -2,871,237,639,985,162,000 | 43.672474 | 346 | 0.559594 | false |
bootchk/resynthesizer | PluginScripts/plugin-uncrop.py | 1 | 6040 | #!/usr/bin/env python
'''
Gimp plugin "Uncrop"
Increase image/canvas size and synthesize outer band from edge of original.
Author:
lloyd konneker, lkk
Version:
1.0 lkk 5/15/2009 Initial version in scheme, released to Gimp Registry.
1.1 lkk 9/21/2009 Translate to python.
License:
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU Public License is available at
http://www.gnu.org/copyleft/gpl.html
The effect for users:
widens the field of view, maintaining perspective of original
Should be undoable, except for loss of selection.
Should work on any image type, any count of layers and channels (although only active layer is affected.)
Programming notes:
Scheme uses - in names, python uses _
Programming devt. cycle:
Initial creation: cp foo.py ~/.gimp-2.6/scripts, chmod +x, start gimp
Refresh: just copy, no need to restart gimp if the pdb registration is unchanged
IN: Nothing special. The selection is immaterial but is not preserved.
OUT larger layer and image. All other layers not enlarged.
'''
from gimpfu import *
gettext.install("resynthesizer", gimp.locale_directory, unicode=True)
def resizeImageCentered(image, percentEnlarge):
# resize and center image by percent (converted to pixel units)
deltaFraction = (percentEnlarge / 100) + 1.0
priorWidth = pdb.gimp_image_width(image)
priorHeight = pdb.gimp_image_height(image)
deltaWidth = priorWidth * deltaFraction
deltaHeight = priorHeight * deltaFraction
centeredOffX = (deltaWidth - priorWidth)/ 2
centeredOffY = (deltaHeight - priorHeight) / 2
pdb.gimp_image_resize(image, deltaWidth, deltaHeight, centeredOffX, centeredOffY)
#if not pdb.gimp_image_resize(image, deltaWidth, deltaHeight, centeredOffX, centeredOffY):
# raise RuntimeError, "Failed resize"
def shrinkSelectionByPercent(image, percent):
# shrink selection by percent (converted to pixel units)
deltaFraction = percent / 100
# convert to pixel dimensions
priorWidth = pdb.gimp_image_width(image)
priorHeight = pdb.gimp_image_height(image)
deltaWidth = priorWidth * deltaFraction
deltaHeight = priorHeight * deltaFraction
# !!! Note total shrink percentage is halved (width of band is percentage/2)
maxDelta = max(deltaWidth, deltaHeight) / 2
pdb.gimp_selection_shrink(image, maxDelta)
#if not pdb.gimp_selection_shrink(image, maxDelta):
# raise RuntimeError, "Failed shrink selection"
def uncrop(orgImage, drawable, percentEnlargeParam=10):
'''
Create frisket stencil selection in a temp image to pass as source (corpus) to plugin resynthesizer,
which does the substantive work.
'''
if not pdb.gimp_item_is_layer(drawable):
pdb.gimp_message(_("A layer must be active, not a channel."))
return
pdb.gimp_image_undo_group_start(orgImage)
# copy original into temp for later use
tempImage = pdb.gimp_image_duplicate(orgImage)
if not tempImage:
raise RuntimeError, "Failed duplicate image"
'''
Prepare target: enlarge canvas and select the new, blank outer ring
'''
# Save original bounds to later select outer band
pdb.gimp_selection_all(orgImage)
selectAllPrior = pdb.gimp_selection_save(orgImage)
# Resize image alone doesn't resize layer, so resize layer also
resizeImageCentered(orgImage, percentEnlargeParam)
pdb.gimp_layer_resize_to_image_size(drawable)
pdb.gimp_image_select_item(orgImage, CHANNEL_OP_REPLACE, selectAllPrior)
# select outer band, the new blank canvas.
pdb.gimp_selection_invert(orgImage)
# Assert target image is ready.
'''
Prepare source (corpus) layer, a band at edge of original, in a dupe.
Note the width of corpus band is same as width of enlargement band.
'''
# Working with the original size.
# Could be alpha channel transparency
workLayer = pdb.gimp_image_get_active_layer(tempImage)
if not workLayer:
raise RuntimeError, "Failed get active layer"
# Select outer band: select all, shrink
pdb.gimp_selection_all(tempImage)
shrinkSelectionByPercent(tempImage, percentEnlargeParam)
pdb.gimp_selection_invert(tempImage) # invert interior selection into a frisket
# Note that v1 resynthesizer required an inverted selection !!
# No need to crop corpus to save memory.
# Note that the API hasn't changed but use_border param now has more values.
# !!! The crux: use_border param=5 means inside out direction
pdb.plug_in_resynthesizer(orgImage, drawable, 0,0,5, workLayer.ID, -1, -1, 0.0, 0.117, 16, 500)
# Clean up.
# Any errors now are moot.
pdb.gimp_selection_none(orgImage)
pdb.gimp_image_remove_channel(orgImage, selectAllPrior)
pdb.gimp_image_undo_group_end(orgImage)
pdb.gimp_displays_flush()
gimp.delete(tempImage) # Comment out to debug corpus creation.
register(
"python_fu_uncrop",
N_("Enlarge image by synthesizing a border that matches the edge, maintaining perspective. Works best for small enlargement of natural edges. Undo a Crop instead, if possible! "),
"Requires separate resynthesizer plugin.",
"Lloyd Konneker",
"Copyright 2009 Lloyd Konneker",
"2009",
N_("Uncrop..."),
"RGB*, GRAY*",
[
(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_SLIDER, "percentEnlargeParam", _("Percent enlargement"), 10, (0, 100, 1))
],
[],
uncrop,
menu="<Image>/Filters/Enhance",
domain=("resynthesizer", gimp.locale_directory)
)
main()
| gpl-3.0 | -6,648,487,917,637,886,000 | 36.515528 | 182 | 0.716391 | false |
vineodd/PIMSim | GEM5Simulation/gem5/tests/testing/helpers.py | 3 | 6498 | #!/usr/bin/env python2
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from __future__ import print_function
import subprocess
from threading import Timer
import time
import re
class CallTimeoutException(Exception):
"""Exception that indicates that a process call timed out"""
def __init__(self, status, stdout, stderr):
self.status = status
self.stdout = stdout
self.stderr = stderr
class ProcessHelper(subprocess.Popen):
"""Helper class to run child processes.
This class wraps a subprocess.Popen class and adds support for
using it in a with block. When the process goes out of scope, it's
automatically terminated.
with ProcessHelper(["/bin/ls"], stdout=subprocess.PIPE) as p:
return p.call()
"""
def __init__(self, *args, **kwargs):
super(ProcessHelper, self).__init__(*args, **kwargs)
def _terminate_nicely(self, timeout=5):
def on_timeout():
self.kill()
if self.returncode is not None:
return self.returncode
timer = Timer(timeout, on_timeout)
self.terminate()
status = self.wait()
timer.cancel()
return status
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.returncode is None:
self._terminate_nicely()
def call(self, timeout=0):
self._timeout = False
def on_timeout():
self._timeout = True
self._terminate_nicely()
status, stdout, stderr = None, None, None
timer = Timer(timeout, on_timeout)
if timeout:
timer.start()
stdout, stderr = self.communicate()
status = self.wait()
timer.cancel()
if self._timeout:
self._terminate_nicely()
raise CallTimeoutException(self.returncode, stdout, stderr)
else:
return status, stdout, stderr
class FileIgnoreList(object):
"""Helper class to implement file ignore lists.
This class implements ignore lists using plain string matching and
regular expressions. In the simplest use case, rules are created
statically upon initialization:
ignore_list = FileIgnoreList(name=("ignore_me.txt", ), rex=(r".*~", )
Ignores can be queried using in the same ways as normal Python
containers:
if file_name in ignore_list:
print "Ignoring %s" % file_name
New rules can be added at runtime by extending the list in the
rules attribute:
ignore_list.rules.append(FileIgnoreList.simple("bar.txt"))
"""
@staticmethod
def simple(r):
return lambda f: f == r
@staticmethod
def rex(r):
re_obj = r if hasattr(r, "search") else re.compile(r)
return lambda name: re_obj.search(name)
def __init__(self, names=(), rex=()):
self.rules = [ FileIgnoreList.simple(n) for n in names ] + \
[ FileIgnoreList.rex(r) for r in rex ]
def __contains__(self, name):
for rule in self.rules:
if rule(name):
return True
return False
if __name__ == "__main__":
# Run internal self tests to ensure that the helpers are working
# properly. The expected output when running this script is
# "SUCCESS!".
cmd_foo = [ "/bin/echo", "-n", "foo" ]
cmd_sleep = [ "/bin/sleep", "10" ]
# Test that things don't break if the process hasn't been started
with ProcessHelper(cmd_foo) as p:
pass
with ProcessHelper(cmd_foo, stdout=subprocess.PIPE) as p:
status, stdout, stderr = p.call()
assert stdout == "foo"
assert status == 0
try:
with ProcessHelper(cmd_sleep) as p:
status, stdout, stderr = p.call(timeout=1)
assert False, "Timeout not triggered"
except CallTimeoutException:
pass
ignore_list = FileIgnoreList(
names=("ignore.txt", "foo/test.txt"),
rex=(r"~$", re.compile("^#")))
assert "ignore.txt" in ignore_list
assert "bar.txt" not in ignore_list
assert "foo/test.txt" in ignore_list
assert "test.txt" not in ignore_list
assert "file1.c~" in ignore_list
assert "file1.c" not in ignore_list
assert "#foo" in ignore_list
assert "foo#" not in ignore_list
ignore_list.rules.append(FileIgnoreList.simple("bar.txt"))
assert "bar.txt" in ignore_list
print("SUCCESS!")
| gpl-3.0 | -1,388,819,918,736,515,300 | 32.84375 | 77 | 0.671283 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/patch_stdout.py | 1 | 4296 | """
patch_stdout
============
This implements a context manager that ensures that print statements within
it won't destroy the user interface. The context manager will replace
`sys.stdout` by something that draws the output above the current prompt,
rather than overwriting the UI.
Usage::
with patch_stdout():
...
application.run()
...
Multiple applications can run in the body of the context manager, one after the
other.
"""
from __future__ import unicode_literals
from .application import run_in_terminal
from .eventloop import get_event_loop
from contextlib import contextmanager
import threading
import sys
__all__ = [
'patch_stdout',
'StdoutProxy',
]
@contextmanager
def patch_stdout(raw=False):
"""
Replace `sys.stdout` by an :class:`_StdoutProxy` instance.
Writing to this proxy will make sure that the text appears above the
prompt, and that it doesn't destroy the output from the renderer. If no
application is curring, the behaviour should be identical to writing to
`sys.stdout` directly.
:param raw: (`bool`) When True, vt100 terminal escape sequences are not
removed/escaped.
"""
proxy = StdoutProxy(raw=raw)
original_stdout = sys.stdout
original_stderr = sys.stderr
# Enter.
sys.stdout = proxy
sys.stderr = proxy
try:
yield
finally:
# Exit.
proxy.flush()
sys.stdout = original_stdout
sys.stderr = original_stderr
class StdoutProxy(object):
"""
Proxy object for stdout which captures everything and prints output above
the current application.
"""
def __init__(self, raw=False, original_stdout=None):
assert isinstance(raw, bool)
original_stdout = original_stdout or sys.__stdout__
self.original_stdout = original_stdout
self._lock = threading.RLock()
self._raw = raw
self._buffer = []
# errors/encoding attribute for compatibility with sys.__stdout__.
self.errors = original_stdout.errors
self.encoding = original_stdout.encoding
def _write_and_flush(self, text):
"""
Write the given text to stdout and flush.
If an application is running, use `run_in_terminal`.
"""
if not text:
# Don't bother calling `run_in_terminal` when there is nothing to
# display.
return
def write_and_flush():
self.original_stdout.write(text)
self.original_stdout.flush()
def write_and_flush_in_loop():
# If an application is running, use `run_in_terminal`, otherwise
# call it directly.
run_in_terminal(write_and_flush, in_executor=False)
# Make sure `write_and_flush` is executed *in* the event loop, not in
# another thread.
get_event_loop().call_from_executor(write_and_flush_in_loop)
def _write(self, data):
"""
Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritten by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout.
"""
if '\n' in data:
# When there is a newline in the data, write everything before the
# newline, including the newline itself.
before, after = data.rsplit('\n', 1)
to_write = self._buffer + [before, '\n']
self._buffer = [after]
text = ''.join(to_write)
self._write_and_flush(text)
else:
# Otherwise, cache in buffer.
self._buffer.append(data)
def _flush(self):
text = ''.join(self._buffer)
self._buffer = []
self._write_and_flush(text)
def write(self, data):
with self._lock:
self._write(data)
def flush(self):
"""
Flush buffered output.
"""
with self._lock:
self._flush()
| mit | -6,573,644,135,968,771,000 | 28.627586 | 79 | 0.607309 | false |
lqhuang/SAXS-tools | RAW/RAWAnalysisWrapper.py | 1 | 19413 | from __future__ import print_function, division
import os
import sys
import re
import copy
import platform
import subprocess
from scipy import polyval, polyfit
import numpy as np
RAW_DIR = os.path.dirname(os.path.abspath(__file__))
if RAW_DIR not in sys.path:
sys.path.append(RAW_DIR)
import SASM
import SASFileIO
import SASExceptions
import SASCalc
from RAWUtils import ErrorPrinter
class GuinierAnalyzer():
"""Wrapper for GuinierControlPanel"""
def __init__(self, raw_settings, stdout):
self.raw_settings = raw_settings
self._stdout = stdout
self.curr_sasm = None
self.old_analysis = None
self.spinctrlIDs = {
'qstart': 0,
'qend': 0,
}
self.infodata = {
'I0': (0, 0),
'Rg': (0, 0),
'qRg_max': 0,
'qRg_min': 0,
'rsq': 0,
}
def analyse(self, sasm):
"""
sasm: (not list) this will modify input sasm object
"""
self.curr_sasm = sasm
self._runAutoRg()
self._calcFit()
self._saveInfo()
def _runAutoRg(self):
rg, rger, i0, i0er, idx_min, idx_max = SASCalc.autoRg(self.curr_sasm)
self.spinctrlIDs['rg'] = rg
self.spinctrlIDs['rger'] = rger
self.spinctrlIDs['i0'] = i0
self.spinctrlIDs['i0er'] = i0er
if rg == -1:
print(
'AutoRG Failed:',
'AutoRG could not find a suitable interval to calculate Rg.',
file=self._stdout)
else:
try:
self.curr_sasm.q[int(idx_min)]
self.curr_sasm.q[int(idx_max)]
self.spinctrlIDs['qstart'] = int(idx_min)
self.spinctrlIDs['qend'] = int(idx_max)
except IndexError:
print(
'AutoRG Failed:',
'AutoRG did not produce a useable result. Please report this to the developers.',
file=self._stdout)
raise IndexError()
def _calcFit(self):
""" calculate fit and statistics """
qstart = self.spinctrlIDs['qstart']
qend = self.spinctrlIDs['qend']
q_roi = self.curr_sasm.q[qstart:qend]
i_roi = self.curr_sasm.i[qstart:qend]
err_roi = self.curr_sasm.err[qstart:qend]
x = np.power(q_roi, 2)
y = np.log(i_roi)
err = y * np.absolute(err_roi / i_roi)
#Remove NaN and Inf values:
x = x[np.where(np.isnan(y) == False)]
err = err[np.where(np.isnan(y) == False)]
y = y[np.where(np.isnan(y) == False)]
x = x[np.where(np.isinf(y) == False)]
err = err[np.where(np.isinf(y) == False)]
y = y[np.where(np.isinf(y) == False)]
#Get 1.st order fit:
ar, br = polyfit(x, y, 1)
#This uses error weighted points to calculate the Rg. Probably the correct way to do it, but different
#from how it has always been done.
# f = lambda x, a, b: a+b*x
# opt, cov = scipy.optimize.curve_fit(f, x, y, sigma = err, absolute_sigma = True)
# ar = opt[1]
# br = opt[0]
#Obtain fit values:
y_fit = polyval([ar, br], x)
#Get fit statistics:
error = y - y_fit
SS_tot = np.sum(np.power(y - np.mean(y), 2))
SS_err = np.sum(np.power(error, 2))
rsq = 1 - SS_err / SS_tot
I0 = br
Rg = np.sqrt(-3 * ar)
if np.isnan(Rg):
Rg = 0
######## CALCULATE ERROR ON PARAMETERS ###############
N = len(error)
stde = SS_err / (N - 2)
std_slope = stde * np.sqrt((1 / N) + (
np.power(np.mean(x), 2) / np.sum(np.power(x - np.mean(x), 2))))
std_interc = stde * np.sqrt(1 / np.sum(np.power(x - np.mean(x), 2)))
######################################################
if np.isnan(std_slope):
std_slope = -1
if np.isnan(std_interc):
std_interc = -1
newInfo = {
'I0': (np.exp(I0), std_interc),
'Rg': (Rg, std_slope),
'qRg_max': Rg * np.sqrt(x[-1]),
'qRg_min': Rg * np.sqrt(x[0]),
'rsq': rsq
}
return x, y_fit, br, error, newInfo
def _saveInfo(self):
x_fit, y_fit, I0, error, newInfo = self._calcFit()
for key, value in newInfo.items():
self.infodata[key] = value
info_dict = copy.deepcopy(self.infodata)
qstart_val = self.spinctrlIDs['qstart']
qend_val = self.spinctrlIDs['qend']
info_dict['qStart'] = qstart_val
info_dict['qEnd'] = qend_val
analysis_dict = self.curr_sasm.getParameter('analysis')
analysis_dict['guinier'] = info_dict
class GNOMAnalyzer():
"""Wrapper for GNOMControlPanel """
def __init__(self, raw_settings, stdout=None):
self.raw_settings = raw_settings
if stdout is None:
self._stdout = sys.stdout
else:
self._stdout = stdout
self.gnom_settings = {
'expert': self.raw_settings.get('gnomExpertFile'),
'rmin_zero': self.raw_settings.get('gnomForceRminZero'),
'rmax_zero': self.raw_settings.get('gnomForceRmaxZero'),
'npts': self.raw_settings.get('gnomNPoints'),
'alpha': self.raw_settings.get('gnomInitialAlpha'),
'angular': self.raw_settings.get('gnomAngularScale'),
'system': self.raw_settings.get('gnomSystem'),
'form': self.raw_settings.get('gnomFormFactor'),
'radius56': self.raw_settings.get('gnomRadius56'),
'rmin': self.raw_settings.get('gnomRmin'),
'fwhm': self.raw_settings.get('gnomFWHM'),
'ah': self.raw_settings.get('gnomAH'),
'lh': self.raw_settings.get('gnomLH'),
'aw': self.raw_settings.get('gnomAW'),
'lw': self.raw_settings.get('gnomLW'),
'spot': self.raw_settings.get('gnomSpot'),
'expt': self.raw_settings.get('gnomExpt'),
}
# self.out_list = {}
self.curr_sasm = None
self.curr_iftm = None
self.spinctrlIDs = {
'qstart': 0,
'qend': 0,
'dmax': 0,
}
self.infodata = {
'guinierI0': (0, 0),
'guinierRg': (0, 0),
'gnomI0': (0, 0),
'gnomRg': (0, 0),
'TE': 0, # 'Total Estimate'
'gnomQuality': 0,
'chisq': 0,
}
self._getGnomVersion()
def _getGnomVersion(self):
"""Checks if we have gnom4 or gnom5"""
atsasDir = self.raw_settings.get('ATSASDir')
opsys = platform.system()
if opsys == 'Windows':
dammifDir = os.path.join(atsasDir, 'dammif.exe')
else:
dammifDir = os.path.join(atsasDir, 'dammif')
if os.path.exists(dammifDir):
process = subprocess.Popen(
'%s -v' % (dammifDir),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
) #gnom4 doesn't do a proper -v!!! So use something else
output, error = process.communicate()
output = output.strip().decode('utf-8')
error = error.strip().decode('utf-8')
dammif_re = 'ATSAS\s*\d+[.]\d+[.]\d*'
version_match = re.search(dammif_re, output)
version = version_match.group().split()[-1]
if int(version.split('.')[0]) > 2 or (
int(version.split('.')[0]) == 2
and int(version.split('.')[1]) >= 8):
self.new_gnom = True
else:
self.new_gnom = False
def analyse(self, sasm):
self.curr_sasm = sasm
analysis_dict = sasm.getParameter('analysis')
if 'guinier' in analysis_dict:
self.spinctrlIDs['qstart'] = analysis_dict['guinier']['qStart']
self.spinctrlIDs['qend'] = analysis_dict['guinier']['qEnd']
self.curr_iftm = self._initGNOM(sasm)
self._saveInfo()
def _initGNOM(self, sasm):
analysis_dict = sasm.getParameter('analysis')
if 'GNOM' in analysis_dict:
iftm = self._initGnomValues(sasm)
assert False
else:
path = self.raw_settings.get('GnomFilePath') # TODO: temp files?
cwd = os.getcwd()
savename = 't_dat.dat'
while os.path.isfile(os.path.join(path, savename)):
savename = 't' + savename
save_sasm = SASM.SASM(
copy.deepcopy(sasm.i), copy.deepcopy(sasm.q),
copy.deepcopy(sasm.err),
copy.deepcopy(sasm.getAllParameters()))
save_sasm.setParameter('filename', savename)
save_sasm.setQrange(sasm.getQrange())
try:
SASFileIO.saveMeasurement(
save_sasm, path, self.raw_settings, filetype='.dat')
except SASExceptions.HeaderSaveError as error:
printer = ErrorPrinter(self.raw_settings, self._stdout)
printer.showSaveError('header')
os.chdir(path)
try:
init_iftm = SASCalc.runDatgnom(savename, sasm,
self.raw_settings)
except SASExceptions.NoATSASError as error:
print(
'Error running GNOM/DATGNOM:',
str(error),
file=self._stdout)
self.cleanupGNOM(path, savename=savename)
os.chdir(cwd)
return None
os.chdir(cwd)
if init_iftm is None:
outname = 't_datgnom.out'
while os.path.isfile(outname):
outname = 't' + outname
if 'guinier' in analysis_dict:
rg = float(analysis_dict['guinier']['Rg'][0]) # TODO: [0]?
dmax = int(rg * 3.) #Mostly arbitrary guess at Dmax
print("????:", rg)
else:
print(
'No DMAX found warning:',
'No Guinier analysis found, arbirary value 80 will be set to DMAX.',
file=self._stdout)
dmax = 80 #Completely arbitrary default setting for Dmax
os.chdir(path)
try:
init_iftm = SASCalc.runGnom(
savename,
outname,
dmax,
self.gnom_settings,
new_gnom=self.new_gnom,
raw_settings=self.raw_settings)
except SASExceptions.NoATSASError as error:
print(
'Error running GNOM/DATGNOM',
str(error),
file=self._stdout)
self.cleanupGNOM(path, savename=savename, outname=outname)
os.chdir(cwd)
return None
os.chdir(cwd)
self.cleanupGNOM(path, outname=outname)
self.cleanupGNOM(path, savename=savename)
iftm = self._initDatgnomValues(sasm, init_iftm)
# plotPanel.plotPr(iftm)
return iftm
def _initGnomValues(self, sasm):
dmax = sasm.getParameter('analysis')['GNOM']['Dmax']
iftm = self._calcGNOM(dmax)
return iftm
def _initDatgnomValues(self, sasm, iftm):
dmax = int(round(iftm.getParameter('dmax')))
if dmax != iftm.getParameter('dmax'):
iftm = self._calcGNOM(dmax)
return iftm
def _calcGNOM(self, dmax):
start = int(self.spinctrlIDs['qstart'])
end = int(self.spinctrlIDs['qend'])
self.gnom_settings['npts'] = 0
path = self.raw_settings.get('GnomFilePath') # TODO: temp path
cwd = os.getcwd()
savename = 't_dat.dat'
while os.path.isfile(os.path.join(path, savename)):
savename = 't' + savename
outname = 't_out.out'
while os.path.isfile(os.path.join(path, outname)):
outname = 't' + outname
save_sasm = SASM.SASM(
copy.deepcopy(self.curr_sasm.i), copy.deepcopy(self.curr_sasm.q),
copy.deepcopy(self.curr_sasm.err),
copy.deepcopy(self.curr_sasm.getAllParameters()))
save_sasm.setParameter('filename', savename)
save_sasm.setQrange((start, end))
try:
SASFileIO.saveMeasurement(
save_sasm, path, self.raw_settings, filetype='.dat')
except SASExceptions.HeaderSaveError as error:
printer = ErrorPrinter(self.raw_settings, self._stdout)
printer.showSaveError('header')
os.chdir(path)
try:
iftm = SASCalc.runGnom(
savename,
outname,
dmax,
self.gnom_settings,
new_gnom=self.new_gnom,
raw_settings=self.raw_settings)
except SASExceptions.NoATSASError as error:
print('Error running GNOM/DATGNOM:', str(error), file=self._stdout)
self.cleanupGNOM(path, savename, outname)
os.chdir(cwd)
return None
os.chdir(cwd)
# self.cleanupGNOM(path, savename, outname)
return iftm
def cleanupGNOM(self, path, savename='', outname=''):
savefile = os.path.join(path, savename)
outfile = os.path.join(path, outname)
if savename != '':
if os.path.isfile(savefile):
try:
os.remove(savefile)
except Exception as error:
print(
error,
'GNOM cleanup failed to remove the .dat file!',
file=self._stdout)
if outname != '':
if os.path.isfile(outfile):
try:
os.remove(outfile)
except Exception as error:
print(
error,
'GNOM cleanup failed to remove the .out file!',
file=self._stdout)
def _saveInfo(self):
gnom_results = {}
dmax = int(round(self.curr_iftm.getParameter('dmax')))
start_idx = self.spinctrlIDs['qstart']
end_idx = self.spinctrlIDs['qend']
gnom_results['Dmax'] = dmax
gnom_results['Total_Estimate'] = self.curr_iftm.getParameter('TE')
gnom_results['Real_Space_Rg'] = self.curr_iftm.getParameter('rg')
gnom_results['Real_Space_I0'] = self.curr_iftm.getParameter('i0')
gnom_results['qStart'] = self.curr_sasm.q[start_idx]
gnom_results['qEnd'] = self.curr_sasm.q[end_idx]
# gnom_results['GNOM_ChiSquared'] = self.curr_iftm['chisq']
# gnom_results['GNOM_Quality_Assessment'] = self.curr_iftm['gnomQuality']
analysis_dict = self.curr_sasm.getParameter('analysis')
analysis_dict['GNOM'] = gnom_results
iftm = self.curr_iftm
iftm.setParameter(
'filename',
os.path.splitext(self.curr_sasm.getParameter('filename'))[0] +
'.out')
if self.raw_settings.get('AutoSaveOnGnom'):
if os.path.isdir(self.raw_settings.get('GnomFilePath')):
self.saveIFTM(iftm, self.raw_settings.get('GnomFilePath'))
else:
self.raw_settings.set('GnomFilePath', False)
print(
'Autosave Error:',
'The folder:\n' + self.raw_settings.get('GNOMFilePath') +
'\ncould not be found. Autosave of GNOM files has been disabled. If you are using a config file from a different computer please go into Advanced Options/Autosave to change the save folders, or save you config file to avoid this message next time.',
file=self._stdout)
def saveIFTM(self, iftm, save_path):
"""Save IFTM object to file."""
if iftm.getParameter('algorithm') == 'GNOM':
newext = '.out'
else:
newext = '.ift'
filename = iftm.getParameter('filename')
check_filename, ext = os.path.splitext(filename)
check_filename = check_filename + newext
filepath = os.path.join(save_path, check_filename)
# file_exists = os.path.isfile(filepath)
filepath = save_path
try:
SASFileIO.saveMeasurement(
iftm, filepath, self.raw_settings, filetype=newext)
except SASExceptions.HeaderSaveError:
printer = ErrorPrinter(self.raw_settings, self._stdout)
printer.showSaveError('header')
class MolecularWeightAnalyzer():
"""Wrapper for MolWeightFrame"""
def __init__(self, raw_settings):
self.raw_settings = raw_settings
self.infodata = {
'I0': ('I0 :', 0, 0),
'Rg': ('Rg :', 0, 0),
}
def analyse(self, sasm):
self.curr_sasm = sasm
if 'molecularWeight' in self.curr_sasm.getParameter('analysis'):
self.old_analysis = copy.deepcopy(
self.curr_sasm.getParameter('analysis')['molecularWeight'])
def updateGuinierInfo(self):
pass
class BIFTAnalyzer():
"""Wrapper for BIFTControlPanel"""
def __init__(self, raw_settings):
self.raw_settings = raw_settings
self.bift_settings = (self.raw_settings.get('PrPoints'),
self.raw_settings.get('maxAlpha'),
self.raw_settings.get('minAlpha'),
self.raw_settings.get('AlphaPoints'),
self.raw_settings.get('maxDmax'),
self.raw_settings.get('minDmax'),
self.raw_settings.get('DmaxPoints'))
self.infodata = {
'dmax': ('Dmax :', 0),
'alpha': ('Alpha :', 0),
'guinierI0': ('I0 :', 0),
'guinierRg': ('Rg :', 0),
'biftI0': ('I0 :', 0),
'biftRg': ('Rg :', 0),
'chisq': ('chi^2 (fit) :', 0),
}
self.iftm = None
def analyse(self, sasm):
self.curr_sasm = sasm
if 'BIFT' in self.curr_sasm.getParameter('analysis'):
self.old_analysis = copy.deepcopy(
self.curr_sasm.getParameter('analysis')['BIFT'])
class RAWAnalysisSimulator():
"""RAW Data Analysis"""
ANALYSIS = {
'guinier': GuinierAnalyzer,
'GNOM': GNOMAnalyzer,
'molecularWeight': None,
'BIFT': None,
}
def __init__(self, raw_settings, stdout=None):
self.raw_settings = raw_settings
if stdout is None:
self._stdout = sys.stdout
else:
self._stdout = stdout
self._analyzer = dict()
for key, analyzer_cls in self.ANALYSIS.items():
if analyzer_cls is not None:
self._analyzer[key] = analyzer_cls(self.raw_settings,
self._stdout)
def analyse(self, sasm):
self._analyzer['guinier'].analyse(sasm)
self._analyzer['GNOM'].analyse(sasm)
| gpl-3.0 | 9,173,039,079,503,906,000 | 32.703125 | 269 | 0.515273 | false |
thomasvdv/flightbit | analysis/thermal_stats.py | 1 | 9061 | import numpy as np
import pandas as pd
import sys
import gdal
import math
from gdalconst import GA_ReadOnly
from skewt import SkewT
import os
import matplotlib.pyplot as plt
from os.path import expanduser
home = expanduser("~")
thermal_file = home + "/RAP/CSV/{}.csv"
def open_raster(name):
"""
This functions opens the raster file for processing
"""
try:
raster = gdal.Open(name, GA_ReadOnly)
except RuntimeError, exception:
print 'Unable to open ' + name
print exception
sys.exit(1)
return raster
def retrieve_band(lat, lon):
"""
This function will take in the given coordinates and return the
elevation(band) NOTE: this only takes in Mercator value does not
work with WGS84
x - coordinates for the x axis or the longitude that users defined
y - coordinates for the y axis or the latitude that user defined
"""
if -125.0 < lon < -115.0 and 50.0 > lat > 45.0:
name = 'SRTM/srtm_13_03.tif'
if -125 < lon < -120:
name = 'SRTM/srtm_12_03.tif'
print 'Using {} for {} {}'.format(name, lat, lon)
raster = open_raster(name)
transform = raster.GetGeoTransform()
x_offset = int((lon - transform[0]) / transform[1])
y_offset = int((lat - transform[3]) / transform[5])
band = raster.GetRasterBand(1)
data = band.ReadAsArray(x_offset, y_offset, 1, 1)
return data[0][0]
else:
print "Thermal out of bound: {} {}".format(lat, lon)
return -1
# Dewpoint calculation adapted from ...
def dew_point(df_snd):
df_snd['DPT_B'] = df_snd.TMP_C.apply(lambda x: 17.368 if x > 0 else 17.966)
df_snd['DPT_C'] = df_snd.TMP_C.apply(lambda x: 238.88 if x > 0 else 247.15)
pa = df_snd.RH / 100. * np.exp(df_snd.DPT_B * df_snd.TMP_C / (df_snd.DPT_C + df_snd.TMP_C))
df_snd['DEWP_C'] = df_snd.DPT_C * np.log(pa) / (df_snd.DPT_B - np.log(pa))
def calc_hgt(df_snd, p):
upper_hgt, upper_level = df_snd.loc[df_snd['level'] <= p].iloc[0][['HGT', 'level']]
lower_hgt, lower_level = df_snd.loc[df_snd['level'] >= p].iloc[-1][['HGT', 'level']]
lvls = range(int(upper_level), int(lower_level) + 1)
hghts = np.empty(len(lvls))
hghts[:] = np.NAN
hghts[0] = upper_hgt
hghts[-1] = lower_hgt
df_hght = pd.DataFrame({'level': lvls, 'HGT': hghts}).interpolate()
hgt, level = df_hght.loc[df_hght['level'] == int(p)].iloc[0][['HGT', 'level']]
return hgt
def get_parcel_at_hgt(terrain, df_snd):
print 'Generating parcel at {}'.format(terrain)
upper_hgt, upper_level, upper_tmp_c, upper_dewp_c, upper_w_dir, upper_w_spd_kts = df_snd.loc[df_snd['HGT'] >= terrain].iloc[0][
['HGT', 'level', 'TMP_C', 'DEWP_C','W_DIR','W_SPD_KTS']]
df_lwr = df_snd.loc[df_snd['HGT'] <= terrain]
if len(df_lwr.index > 0):
lower_hgt, lower_level, lower_tmp_c, lower_dewp_c, lower_w_dir, lower_w_spd_kts = df_lwr.iloc[-1][
['HGT', 'level', 'TMP_C', 'DEWP_C','W_DIR','W_SPD_KTS']]
hgts = range(int(lower_hgt), int(upper_hgt) + 1)
interp = np.empty(len(hgts))
interp[:] = np.NAN
levels = list(interp)
levels[0] = lower_level
levels[-1] = upper_level
temps = list(interp)
temps[0] = lower_tmp_c
temps[-1] = upper_tmp_c
dewpts = list(interp)
dewpts[0] = lower_dewp_c
dewpts[-1] = upper_dewp_c
wdirs = list(interp)
wdirs[0] = lower_w_dir
wdirs[-1] = upper_w_dir
wkts = list(interp)
wkts[0] = lower_w_spd_kts
wkts[-1] = upper_w_spd_kts
df_interp = pd.DataFrame({'HGT': hgts, 'level': levels, 'TMP_C': temps, 'DEWP_C': dewpts, 'W_DIR': wdirs, 'W_SPD_KTS': wkts}).interpolate()
hgt, level, tmp_c, dewp_c, w_dir, w_spd_kts = df_interp.loc[df_interp['HGT'] == int(terrain)].iloc[0][
['HGT', 'level', 'TMP_C', 'DEWP_C','W_DIR','W_SPD_KTS']]
return (level, tmp_c, dewp_c, 'interp', hgt, w_dir, w_spd_kts)
else:
return (upper_level, upper_tmp_c, upper_dewp_c, 'lowest', upper_hgt, upper_w_dir, upper_w_spd_kts)
def strip_to_terrain(df_snd, parcel):
level = parcel[0]
# Reduce the sounding to terrain height.
return df_snd.loc[df_snd['level'] <= level].reset_index(drop=True)
def process_thermal_wx(thermal):
print 'Calculating WX for {}'.format(thermal)
lon = thermal.longitude
lat = thermal.latitude
terrain = retrieve_band(lat, lon)
if terrain == -1:
return
df = pd.read_csv(thermal_file.format(thermal.thermal_id))
if len(df.index) < 185:
df.to_csv(home + "/RAP/CSV/{}.error".format(thermal.thermal_id))
return
df['paramId'] = pd.to_numeric(df.paramId, errors='coerce')
df['value'] = pd.to_numeric(df.value, errors='coerce')
df['level'] = pd.to_numeric(df.level, errors='coerce')
# Geopotential Height
df_hgt = df.loc[df['paramId'] == 156][0:37]
df_hgt = df_hgt.rename(columns={'value': 'HGT'}).drop('paramId', 1)
# Temperature
df_tmp = df.loc[df['paramId'] == 130][0:37]
df_tmp = df_tmp.rename(columns={'value': 'TMP_K'}).drop('paramId', 1)
# Relative Humidity
df_rh = df.loc[df['paramId'] == 157][0:37]
df_rh = df_rh.rename(columns={'value': 'RH'}).drop('paramId', 1)
# U component of wind
df_uw = df.loc[df['paramId'] == 131][0:37]
df_uw = df_uw.rename(columns={'value': 'W_U'}).drop('paramId', 1)
# V component of windcd R
df_vw = df.loc[df['paramId'] == 132][0:37]
df_vw = df_vw.rename(columns={'value': 'W_V'}).drop('paramId', 1)
# Ground Temperature
# df_gtmp = df.loc[df['paramId'] == 167]
dfs = [df_hgt, df_tmp, df_rh, df_uw, df_vw]
df_snd = reduce(lambda left, right: pd.merge(left, right, on='level'), dfs)
# Wind Speed
df_snd['W_SPD_MS'] = (df_snd.W_U ** 2 + df_snd.W_V ** 2) ** (0.5)
df_snd['W_SPD_KTS'] = df_snd.W_SPD_MS * 1.94384
# Wind Direction
df_snd['W_DIR'] = np.arctan2(df_snd.W_U, df_snd.W_V) * (180. / np.pi)
# Temperature in Celcius
df_snd['TMP_C'] = df_snd.TMP_K - 273.15
# Dewpoint Temperature
dew_point(df_snd)
# Get the lift parcel for the terrain altitude
parcel = get_parcel_at_hgt(terrain, df_snd)
df_snd = strip_to_terrain(df_snd, parcel)
# Retrieve surface temperature
print parcel
base_tmp = parcel[1]
base_hgt = parcel[4]
thermal['ground_temp_c'] = base_tmp
thermal['ground_elev'] = base_hgt
thermal['ground_w_dir'] = parcel[5]
thermal['ground_w_spd_kts'] = parcel[6]
# Add the DALR
df_snd['DALR'] = base_tmp - ((df_snd.HGT - base_hgt) / 1000) * 9.8
# Virtual Temperature
df_snd['VIRTT'] = (df_snd.TMP_K) / (
1 - 0.379 * (6.11 * np.power(((7.5 * df_snd.DEWP_C) / (237.7 + df_snd.DEWP_C)), 10)) / df_snd.level) - 273.15
# Thermal Index
df_snd['TI'] = df_snd.TMP_C - df_snd.DALR
df_snd['TI_ROUND'] = df_snd['TI'].round()
# Top of lift
lift_top = np.NAN
df_lift = df_snd.loc[df_snd['TI_ROUND'] <= 0]
if len(df_lift.index > 0) :
lift_top = df_lift.iloc[-1]['HGT']
thermal['lift_top'] = lift_top
hght = df_snd[['HGT']].as_matrix().flatten()
pres = df_snd[['level']].as_matrix().flatten()
temp = df_snd[['TMP_C']].as_matrix().flatten()
dwpt = df_snd[['DEWP_C']].as_matrix().flatten()
sknt = df_snd[['W_DIR']].as_matrix().flatten()
drct = df_snd[['W_SPD_KTS']].as_matrix().flatten()
mydata = dict(zip(('hght', 'pres', 'temp', 'dwpt', 'sknt', 'drct'), (hght, pres, temp, dwpt, sknt, drct)))
S = SkewT.Sounding(soundingdata=mydata)
S.make_skewt_axes();
S.add_profile();
S.lift_parcel(*parcel[0:4])
Plcl, Plfc, P_el, CAPE, CIN = S.get_cape(*parcel[0:4])
# S.plot_skewt(title=thermal.time)
plt.title('Test')
plt.savefig(home + "/RAP/PNG/{}.png".format(thermal.thermal_id))
Hlcl = calc_hgt(df_snd, Plcl)
thermal['H_lcl'] = Hlcl
Hlfc = Plfc
if not (math.isnan(Plfc)):
Hlfc = calc_hgt(df_snd, Plfc)
thermal['H_lfc'] = Hlfc
H_el = P_el
if not (math.isnan(P_el)):
H_el = calc_hgt(df_snd, P_el)
thermal['H_el'] = H_el
thermal['CAPE'] = CAPE
thermal['CIN'] = CIN
return thermal
if __name__ == '__main__':
# Find all thermals in the thermals directory.
# Process each one and add the result to the WX folder
output = home + "/OLC/CSV/thermals_wx.csv"
thermal_idx = home + "/OLC/CSV/thermals.csv"
df_thermals = pd.read_csv(thermal_idx)
df_thermals_wx = pd.DataFrame()
for idx, thermal in df_thermals.iterrows():
thermal_id = thermal.thermal_id
if os.path.isfile(home + "/RAP/CSV/{}.csv".format(thermal_id)):
print 'Start processing thermal {}'.format(thermal_id)
thermal = process_thermal_wx(thermal)
print thermal
df_thermals_wx = df_thermals_wx.append(thermal)
else:
print 'Skipping thermal {}'.format(thermal_id)
df_thermals_wx.to_csv(output, index=False) | gpl-2.0 | 8,520,788,158,754,758,000 | 33.196226 | 147 | 0.585587 | false |
suutari/shoop | shuup/testing/admin_module/toolbar.py | 1 | 1529 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.toolbar import DropdownItem, URLActionButton
class MockContactToolbarButton(URLActionButton):
def __init__(self, contact, **kwargs):
kwargs["icon"] = "fa fa-user"
kwargs["text"] = _("Hello") + contact.full_name
kwargs["extra_css_class"] = "btn-info"
kwargs["url"] = "/#mocktoolbarbutton"
self.contact = contact
super(MockContactToolbarButton, self).__init__(**kwargs)
class MockContactToolbarActionItem(DropdownItem):
def __init__(self, object, **kwargs):
kwargs["icon"] = "fa fa-hand-peace-o"
kwargs["text"] = _("Hello %(name)s") % {"name": object.full_name}
kwargs["url"] = "/#mocktoolbaractionitem"
super(MockContactToolbarActionItem, self).__init__(**kwargs)
@staticmethod
def visible_for_object(object):
return True
class MockProductToolbarActionItem(DropdownItem):
def __init__(self, object, **kwargs):
kwargs["icon"] = "fa fa-female"
kwargs["text"] = _("This is %(sku)s") % {"sku": object.sku}
kwargs["url"] = "#%(sku)s" % {"sku": object.sku}
super(MockProductToolbarActionItem, self).__init__(**kwargs)
| agpl-3.0 | -2,588,089,296,794,377,700 | 32.977778 | 73 | 0.646174 | false |
hep-cce/hpc-edge-service | balsam/BalsamJobReceiver.py | 1 | 2368 | import logging,sys,os
logger = logging.getLogger(__name__)
from common import MessageReceiver,db_tools
from balsam import models
from django.conf import settings
from django.db import utils,connections,DEFAULT_DB_ALIAS
class BalsamJobReceiver(MessageReceiver.MessageReceiver):
''' subscribes to the input user job queue and adds jobs to the database '''
def __init__(self):
MessageReceiver.MessageReceiver.__init__(self,
settings.BALSAM_SITE,
settings.BALSAM_SITE,
settings.RABBITMQ_SERVER_NAME,
settings.RABBITMQ_SERVER_PORT,
settings.RABBITMQ_BALSAM_EXCHANGE_NAME,
settings.RABBITMQ_SSL_CERT,
settings.RABBITMQ_SSL_KEY,
settings.RABBITMQ_SSL_CA_CERTS
)
# This is where the real processing of incoming messages happens
def consume_msg(self,channel,method_frame,header_frame,body):
logger.debug('in consume_msg' )
if body is not None:
logger.debug(' received message: ' + body )
try:
job = models.BalsamJob()
job.deserialize(body)
except Exception,e:
logger.exception('error deserializing incoming job. body = ' + body + ' not conitnuing with this job.')
channel.basic_ack(method_frame.delivery_tag)
return
# should be some failure notice to argo
# create unique DB connection string
try:
db_connection_id = db_tools.get_db_connection_id(os.getpid())
db_backend = utils.load_backend(connections.databases[DEFAULT_DB_ALIAS]['ENGINE'])
db_conn = db_backend.DatabaseWrapper(connections.databases[DEFAULT_DB_ALIAS], db_connection_id)
connections[db_connection_id] = db_conn
except Exception,e:
logger.exception(' received exception while creating DB connection, exception message: ')
# acknoledge message
channel.basic_ack(method_frame.delivery_tag)
return
job.save()
models.send_status_message(job)
else:
logger.error(' consume_msg called, but body is None ')
# should be some failure notice to argo
# acknowledge receipt of message
channel.basic_ack(method_frame.delivery_tag)
# delete DB connection
del connections[db_connection_id]
| bsd-3-clause | -2,875,591,182,772,225,000 | 36.587302 | 115 | 0.644426 | false |
SoBeRBot94/TicTacToe-GE | GameEngine/Player.py | 1 | 1883 | from GameEngine.AI import _AI
from GameEngine.User import _User
class Player(object):
"""
The class which controls if it's a human player or an AI.
Can take three parameters as input, where the name and the type of the player (AI or user) is mandatory
A second parameter, difficulty, is not needed when initializing a user type of player
but it is mandatory when initializing a AI. If trying to initialize an AI without the difficulty
parameter, a ValueError is raised from the _AI class. If trying to initialize the Player class
without specifying that the player is a 'ai' or a 'user', a ValueError is raised notifying the client of this.
The only public facing method which is used by the client is nextMove.
:param name: The name of the player
:param typePlayer: The type of the player, "AI" or "user"
:param difficulty: The difficulty level of the AI, "easy", "medium" or "hard". The default is hard
"""
def __init__(self, name, typePlayer, difficulty='hard'):
self.name = name
self.typePlayer = typePlayer
self.difficulty = difficulty
if typePlayer.lower() == 'ai':
self._Player = _AI(difficulty)
elif typePlayer.lower() == 'user':
self._Player = _User()
else:
raise ValueError('The stated player is not an AI nor a user. ' \
'Please make sure one of those have been stated.')
def nextMove(self, board, currentPlayer):
"""
Runs the method nextMove for the class which was initialized.
:param board: The 3x3 board from GameEngine
:param currentPlayer: The player who is making the next move (X or O)
:returns: tuple with the row and column for the next move. On the form of (rowIdx, colIdx)
"""
return self._Player.nextMove(board, currentPlayer)
| gpl-3.0 | -5,555,820,729,064,458,000 | 44.926829 | 114 | 0.663303 | false |
SCSSoftware/BlenderTools | addon/io_scs_tools/internals/persistent/initialization.py | 1 | 6852 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2019: SCS Software
import bpy
import os
from bpy.app.handlers import persistent
from io_scs_tools.internals import preview_models as _preview_models
from io_scs_tools.internals.callbacks import open_gl as _open_gl_callback
from io_scs_tools.internals.callbacks import lighting_east_lock as _lighting_east_lock_callback
from io_scs_tools.internals.containers import config as _config_container
from io_scs_tools.internals.connections.wrappers import collection as _connections_wrapper
from io_scs_tools.utils import get_scs_globals as _get_scs_globals
from io_scs_tools.utils import info as _info_utils
from io_scs_tools.utils.printout import lprint
@persistent
def post_load(scene):
"""Initialize blender tools from handlers hooks.
NOTE: Should be used only as load_post handler hook.
:param scene: Blender scene
:type scene: bpy.types.Scene
"""
init_scs_tools()
def on_enable():
"""Initialize blender tools on addon enabling.
NOTE: Should be used only as timer function on addon enabling.
"""
# run until properly initialized
if init_scs_tools():
return None
return 1.0
def init_scs_tools():
"""Parts and Variants data initialisation (persistent).
Things which this function does:
1. copies all the settings to current world
2. checks object identities
3. updates shaders presets path and reloads them
Cases when it should be run:
1. Blender startup -> SCS tools needs to configured
2. Opening .blend file -> because all the configs needs to be moved to current world
3. addon reloading and enable/disable -> for SCS tools this is the same as opening Blender
"""
# SCREEN CHECK...
if not hasattr(bpy.data, "worlds"):
lprint("I Initialization abort, context incorrect ...")
return False
lprint("I Initialization of SCS scene, BT version: " + _info_utils.get_tools_version())
# NOTE: covers: start-up, reload, enable/disable and it should be immediately removed
# from handlers as soon as it's executed for the first time
# if initialise_scs_dict in bpy.app.handlers.scene_update_post:
# bpy.app.handlers.scene_update_post.remove(initialise_scs_dict)
# INITIALIZE CUSTOM CONNECTIONS DRAWING SYSTEM
_connections_wrapper.init()
# TODO: this should not be needed anymore, as we don't config locks shouldn't be saved in blend file anymore see: scs_globals.get_writtable_keys
# release lock as user might saved blender file during engaged lock.
# If that happens config lock property gets saved to blend file and if user opens that file again,
# lock will be still engaged and no settings could be applied without releasing lock here.
_config_container.release_config_lock()
# USE SETTINGS FROM CONFIG...
# NOTE: Reapplying the settings from config file to the currently opened Blender file datablock.
# The thing is, that every Blend file holds its own copy of SCS Global Settings from the machine on which it got saved.
# The SCS Global Settings needs to be overwritten upon each file load to reflect the settings from local config file,
# but also upon every SCS Project Base Path change.
_config_container.apply_settings(preload_from_blend=True)
# GLOBAL PATH CHECK...
if _get_scs_globals().scs_project_path != "":
if not os.path.isdir(_get_scs_globals().scs_project_path):
lprint("\nW The Project Path %r is NOT VALID!\n\tPLEASE SELECT A VALID PATH TO THE PROJECT BASE FOLDER.\n",
(_get_scs_globals().scs_project_path,))
# CREATE PREVIEW MODEL LIBRARY
_preview_models.init()
# ADD DRAW HANDLERS
_open_gl_callback.enable(mode=_get_scs_globals().drawing_mode)
# ENABLE LIGHTING EAST LOCK HANDLER
# Blender doesn't call update on properties when file is opened,
# so in case lighting east was locked in saved blend file, we have to manually enable callback for it
# On the other hand if user previously had east locked and now loaded the file without it,
# again we have to manually disable callback.
if _get_scs_globals().lighting_east_lock:
_lighting_east_lock_callback.enable()
else:
_lighting_east_lock_callback.disable()
# as last notify user if his Blender version is outdated
if not _info_utils.is_blender_able_to_run_tools():
message = "Your Blender version %s is outdated, all SCS Blender Tools functionalities were internally disabled.\n\t " \
"Please update Blender before continue, minimal required version for SCS Blender Tools is: %s!"
message = message % (_info_utils.get_blender_version()[0], _info_utils.get_required_blender_version())
# first report error with blender tools printing system
lprint("E " + message)
# then disable add-on as it's not usable in the case Blender is out-dated
bpy.ops.preferences.addon_disable(module="io_scs_tools")
# and as last show warning message in the form of popup menu for user to see info about outdated Blender
# As we don't have access to our 3D view report operator anymore,
# we have to register our SCS_TOOLS_OT_ShowMessageInPopup class back and invoke it.
from io_scs_tools.operators.wm import SCS_TOOLS_OT_ShowMessageInPopup
bpy.utils.register_class(SCS_TOOLS_OT_ShowMessageInPopup)
bpy.ops.wm.scs_tools_show_message_in_popup(context, 'INVOKE_DEFAULT',
is_modal=True,
title="SCS Blender Tools Initialization Problem",
message="\n\n" + message.replace("\t ", "") + "\n\n", # formatting for better visibility
width=580, # this is minimal width to properly fit in given message
height=bpy.context.window.height if bpy.context and bpy.context.window else 200)
return True
| gpl-2.0 | -7,920,692,968,261,022,000 | 45.297297 | 148 | 0.691039 | false |
64studio/smart | smart/fetcher.py | 1 | 67875 | #
# Copyright (c) 2005 Canonical
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.util.strtools import sizeToStr, speedToStr, secondsToStr
from smart.media import MediaSet, DeviceMedia
from smart.uncompress import Uncompressor
from smart.mirror import MirrorSystem
from smart.const import *
from smart import *
import tempfile
import socket
import urllib
import string
import thread
import time
import os
import re
import signal
import threading
MAXRETRIES = 30
SPEEDDELAY = 1
CANCELDELAY = 2
MAXACTIVEDOWNLOADS = 10
SOCKETTIMEOUT = 600
class FetcherCancelled(Error): pass
class Fetcher(object):
_registry = {}
_localschemes = []
def __init__(self):
self._uncompressor = Uncompressor()
self._mediaset = MediaSet()
self._uncompressing = 0
self._localdir = tempfile.gettempdir()
self._mirrorsystem = MirrorSystem()
self._mangle = False
self._caching = OPTIONAL
self._items = {}
self._handlers = {}
self._forcecopy = False
self._forcemountedcopy = False
self._localpathprefix = None
self._cancel = False
self._speedupdated = 0
self._activedownloads = 0
self._activedownloadslock = thread.allocate_lock()
self._maxactivedownloads = 0
self.time = 0
self._eta = 0
def reset(self):
self._items.clear()
self._uncompressing = 0
def cancel(self):
self._cancel = True
def getItem(self, url):
return self._items.get(url)
def getItems(self):
return self._items.values()
def getSucceededSet(self):
set = {}
for item in self._items.values():
if item.getStatus() == SUCCEEDED:
set[item.getOriginalURL()] = item.getTargetPath()
return set
def getFailedSet(self):
set = {}
for item in self._items.values():
if item.getStatus() == FAILED:
set[item.getOriginalURL()] = item.getFailedReason()
return set
def getUncompressor(self):
return self._uncompressor
def getMediaSet(self):
return self._mediaset
def getMirrorSystem(self):
return self._mirrorsystem
def getCaching(self):
return self._caching
def setCaching(self, value):
self._caching = value
def setLocalDir(self, localdir, mangle=False):
self._localdir = localdir
self._mangle = mangle
def getLocalDir(self):
return self._localdir
def setLocalPathPrefix(self, prefix):
self._localpathprefix = prefix
def getLocalPathPrefix(self):
return self._localpathprefix
def getLocalPath(self, item):
assert isinstance(item, FetchItem)
url = item.getOriginalURL()
if self._mangle:
filename = url.replace("/", "_")
else:
scheme, selector = urllib.splittype(url)
host, path = urllib.splithost(selector)
path, query = urllib.splitquery(path)
path = urllib.unquote(path)
filename = os.path.basename(path)
if self._localpathprefix:
filename = self._localpathprefix+filename
return os.path.join(self._localdir, filename)
def setForceCopy(self, value):
self._forcecopy = value
def getForceCopy(self):
return self._forcecopy
def setForceMountedCopy(self, value):
self._forcemountedcopy = value
def getForceMountedCopy(self):
return self._forcemountedcopy
def changeActiveDownloads(self, value):
result = False
self._activedownloadslock.acquire()
if self._activedownloads+value <= self._maxactivedownloads:
self._activedownloads += value
result = True
self._activedownloadslock.release()
return result
def getActiveDownloads(self):
return self._activedownloads
def enqueue(self, url, **info):
if url in self._items:
raise Error, _("%s is already in the queue") % url
mirror = self._mirrorsystem.get(url)
item = FetchItem(self, url, mirror)
self._items[url] = item
if info:
item.setInfo(**info)
handler = self.getHandlerInstance(item)
handler.enqueue(item)
return item
def runLocal(self):
for handler in self._handlers.values():
handler.runLocal()
def run(self, what=None, progress=None):
socket.setdefaulttimeout(sysconf.get("socket-timeout", SOCKETTIMEOUT))
self._cancel = False
thread_name = threading.currentThread().getName()
if thread_name == "MainThread":
def quitIntHandler(signal, frame):
print '\nInterrupted\n'
sys.exit(0)
old_quit_handler = signal.signal(signal.SIGQUIT, quitIntHandler)
old_int_handler = signal.signal(signal.SIGINT, quitIntHandler)
self._activedownloads = 0
self._maxactivedownloads = sysconf.get("max-active-downloads",
MAXACTIVEDOWNLOADS)
self._maxdownloadrate = sysconf.get("max-download-rate", 0)
self.time = time.time()
handlers = self._handlers.values()
total = len(self._items)
self.runLocal()
local = len([x for x in self._items.values()
if x.getStatus() == SUCCEEDED])
if local == total or self._caching is ALWAYS:
if progress:
progress.add(total)
return
if progress:
prog = progress
prog.add(local)
if what:
prog.setTopic(_("Fetching %s...") % what)
prog.show()
else:
prog = iface.getProgress(self, True)
prog.start()
prog.set(local, total)
if what:
topic = _("Fetching %s...") % what
else:
topic = _("Fetching information...")
prog.setTopic(topic)
prog.show()
for handler in handlers:
handler.start()
active = handlers[:]
uncomp = self._uncompressor
uncompchecked = {}
self._speedupdated = self.time
cancelledtime = None
while active or self._uncompressing:
self.time = time.time()
if self._cancel:
if not cancelledtime:
cancelledtime = self.time
for handler in active[:]:
if not handler.wasCancelled():
handler.cancel()
if not handler.tick():
active.remove(handler)
# We won't wait for handlers which are not being nice.
if time.time() > cancelledtime+CANCELDELAY:
for item in self._items.values():
if item.getStatus() != SUCCEEDED:
item.setCancelled()
# Remove handlers, since we don't know their state.
self._handlers.clear()
prog.show()
break
prog.show()
continue
for handler in active[:]:
if not handler.tick():
active.remove(handler)
if self._speedupdated+SPEEDDELAY < self.time:
self._speedupdated = self.time
updatespeed = True
else:
updatespeed = False
for url in self._items:
item = self._items[url]
if item.getStatus() == FAILED:
if (item.getRetries() < MAXRETRIES and
item.setNextURL()):
item.reset()
handler = self.getHandlerInstance(item)
handler.enqueue(item)
if handler not in active:
active.append(handler)
continue
elif (item.getStatus() != SUCCEEDED or
not item.getInfo("uncomp")):
if updatespeed:
item.updateSpeed()
item.updateETA()
continue
localpath = item.getTargetPath()
if localpath in uncompchecked:
continue
uncompchecked[localpath] = True
uncomphandler = uncomp.getHandler(localpath)
if not uncomphandler:
continue
uncomppath = uncomphandler.getTargetPath(localpath)
if (not self.hasStrongValidate(item, uncomp=True) or
not self.validate(item, uncomppath, uncomp=True)):
self._uncompressing += 1
thread.start_new_thread(self._uncompress,
(item, localpath, uncomphandler))
else:
item.setSucceeded(uncomppath)
prog.show()
time.sleep(0.1)
for handler in handlers:
handler.stop()
if not progress:
prog.stop()
if thread_name == "MainThread":
signal.signal(signal.SIGQUIT, old_quit_handler)
signal.signal(signal.SIGINT, old_int_handler)
if self._cancel:
raise FetcherCancelled, _("Cancelled")
def _uncompress(self, item, localpath, uncomphandler):
try:
uncomphandler.uncompress(localpath)
except Error, e:
item.setFailed(unicode(e))
else:
uncomppath = uncomphandler.getTargetPath(localpath)
valid, reason = self.validate(item, uncomppath,
withreason=True, uncomp=True)
if not valid:
item.setFailed(reason)
else:
item.setSucceeded(uncomppath)
self._uncompressing -= 1
def getLocalSchemes(self):
return self._localschemes
getLocalSchemes = classmethod(getLocalSchemes)
def setHandler(self, scheme, klass, local=False):
self._registry[scheme] = klass
if local:
self._localschemes.append(scheme)
setHandler = classmethod(setHandler)
def getHandler(self, scheme, klass):
return self._registry.get(scheme)
getHandler = classmethod(getHandler)
def getHandlerInstance(self, item):
scheme = item.getURL().scheme
proxy = sysconf.get("%s-proxy" % scheme)
if proxy:
os.environ["%s_proxy" % scheme] = proxy
handler = self._handlers.get(scheme)
if not handler:
klass = self._registry.get(scheme)
if not klass:
raise Error, _("Unsupported scheme: %s") % scheme
handler = klass(self)
self._handlers[scheme] = handler
return handler
def hasStrongValidate(self, item, uncomp=False):
if uncomp:
prefix = "uncomp_"
else:
prefix = ""
return bool(item.getInfo(prefix+"md5") or
item.getInfo(prefix+"sha") or
item.getInfo(prefix+"sha256"))
def validate(self, item, localpath, withreason=False, uncomp=False):
try:
if not os.path.isfile(localpath):
raise Error, _("File not found for validation")
if uncomp:
uncompprefix = "uncomp_"
else:
uncompprefix = ""
validate = item.getInfo(uncompprefix+"validate")
if validate:
valid, reason = validate(item.getOriginalURL(),
localpath, withreason=True)
if valid is not None:
if withreason:
return valid, reason
else:
return valid
size = item.getInfo(uncompprefix+"size")
if size:
lsize = os.path.getsize(localpath)
if lsize != size:
raise Error, _("Unexpected size (expected %d, got %d)") % \
(size, lsize)
filemd5 = item.getInfo(uncompprefix+"md5")
if filemd5:
try:
from hashlib import md5
except ImportError:
from md5 import md5
digest = md5()
file = open(localpath)
data = file.read(BLOCKSIZE)
while data:
digest.update(data)
data = file.read(BLOCKSIZE)
lfilemd5 = digest.hexdigest()
if lfilemd5 != filemd5:
raise Error, _("Invalid MD5 (expected %s, got %s)") % \
(filemd5, lfilemd5)
filesha256 = item.getInfo(uncompprefix+"sha256")
if filesha256:
try:
from hashlib import sha256
except ImportError:
from smart.util.sha256 import sha256
digest = sha256()
file = open(localpath)
data = file.read(BLOCKSIZE)
while data:
digest.update(data)
data = file.read(BLOCKSIZE)
lfilesha256 = digest.hexdigest()
if lfilesha256 != filesha256:
raise Error, _("Invalid SHA256 (expected %s, got %s)") % \
(filesha256, lfilesha256)
else:
filesha = item.getInfo(uncompprefix+"sha")
if filesha:
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
digest = sha()
file = open(localpath)
data = file.read(BLOCKSIZE)
while data:
digest.update(data)
data = file.read(BLOCKSIZE)
lfilesha = digest.hexdigest()
if lfilesha != filesha:
raise Error, _("Invalid SHA (expected %s, got %s)") % \
(filesha, lfilesha)
except Error, reason:
if withreason:
return False, reason
return False
else:
if withreason:
return True, None
return True
class FetchItem(object):
def __init__(self, fetcher, url, mirror):
self._fetcher = fetcher
self._url = url
self._mirror = mirror
self._urlobj = URL(mirror.getNext())
self._retries = 0
self._starttime = None
self._current = 0
self._total = 0
self._speed = 0
self._eta = 0
self._speedtime = 0
self._speedcurrent = 0
self._info = {}
self._status = WAITING
self._failedreason = None
self._targetpath = None
self._progress = iface.getSubProgress(fetcher)
def reset(self):
self._status = WAITING
self._failedreason = None
self._targetpath = None
self._starttime = None
self._current = 0
self._total = 0
self._speed = 0
self._speedtime = 0
self._speedcurrent = 0
url = self._urlobj.original
if self._progress.getSub(url):
self._progress.setSubStopped(url)
self._progress.show()
self._progress.resetSub(url)
def getRetries(self):
return self._retries
def setNextURL(self):
self._retries += 1
url = self._mirror.getNext()
if url:
self._urlobj.set(url)
return True
else:
self._urlobj.set(self._url)
return False
def getOriginalURL(self):
return self._url
def getURL(self):
return self._urlobj
def setURL(self, url):
self._urlobj.set(url)
def getStatus(self):
return self._status
def getFailedReason(self):
return self._failedreason
def getTargetPath(self):
return self._targetpath
def getInfo(self, kind, default=None):
return self._info.get(kind, default)
def setInfo(self, **info):
# Known used info kinds:
#
# - validate: validate function, it must accept a 'withreason'
# keyword, and must return either 'valid, reason'
# or just 'valid', depending on 'withreason'. 'valid'
# may be None, True, or False. If it's True or False,
# no other information will be checked.
# - md5, sha, sha256: file digest
# - size: file size
# - uncomp: whether to uncompress or not
# - uncomp_{md5,sha,sha256,size}: uncompressed equivalents
#
for kind in ("md5", "sha", "sha256",
"uncomp_md5", "uncomp_sha", "uncomp_sha256"):
value = info.get(kind)
if value:
info[kind] = value.lower()
self._info.update(info)
def start(self):
if self._status is WAITING:
self._status = RUNNING
self._starttime = self._fetcher.time
prog = self._progress
url = self._urlobj.original
prog.setSubTopic(url, url)
prog.setSubTopic(url, re.sub("([a-z]+:/+[^:/]+:)[^/]+(@.*)",
r"\1*\2", url))
prog.setSub(url, 0, self._info.get("size") or 1, 1)
prog.show()
def progress(self, current, total):
if self._status is RUNNING:
self._current = current
self._total = total
if total:
subdata = {}
subdata["current"] = sizeToStr(current)
subdata["total"] = sizeToStr(total)
subdata["speed"] = speedToStr(self._speed)
subdata["eta"] = secondsToStr(self._eta)
self._progress.setSub(self._urlobj.original, current, total, 1,
subdata)
self._progress.show()
def updateSpeed(self):
if self._status is RUNNING:
now = self._fetcher.time
if not self._current or not self._speedtime:
self._speedcurrent = self._current
self._speedtime = now
elif self._speedtime+1 < now:
speed = self._speed
currentdelta = self._current-self._speedcurrent
timedelta = now-self._speedtime
speed = currentdelta/timedelta
self._speed = self._speed+(speed-self._speed)*0.25
self._speedtime = now
self._speedcurrent = self._current
self.progress(self._current, self._total)
def updateETA(self):
if self._status is RUNNING:
if (self._speed > 1) and (self._total > 0):
self._eta = (self._total - self._current) / self._speed
else:
self._eta = None
def setSucceeded(self, targetpath, fetchedsize=0):
if self._status is not FAILED:
self._status = SUCCEEDED
self._targetpath = targetpath
if self._starttime:
if fetchedsize:
now = self._fetcher.time
timedelta = now-self._starttime
if timedelta < 1:
timedelta = 1
self._mirror.addInfo(time=timedelta, size=fetchedsize)
self._speed = fetchedsize/timedelta
self._progress.setSubDone(self._urlobj.original)
self._progress.show()
def setFailed(self, reason):
self._status = FAILED
self._failedreason = reason
if self._starttime:
self._mirror.addInfo(failed=1)
self._progress.setSubStopped(self._urlobj.original)
self._progress.show()
def setCancelled(self):
self.setFailed(_("Cancelled"))
class URL(object):
def __init__(self, url=None):
if url:
self.set(url)
else:
self.reset()
def reset(self):
self.original = ""
self.scheme = ""
self.user = ""
self.passwd = ""
self.host = ""
self.port = None
self.path = ""
self.query = ""
def set(self, url):
if url.startswith("/"):
self.scheme = "file"
rest = url
else:
if ":/" not in url:
raise Error, _("Invalid URL: %s") % url
self.scheme, rest = urllib.splittype(url)
if self.scheme in Fetcher.getLocalSchemes():
scheme = self.scheme
self.reset()
self.scheme = scheme
self.original = url
self.path = os.path.normpath(rest)
if self.path.startswith("//"):
self.path = self.path[1:]
return
self.original = url
host, rest = urllib.splithost(rest)
user, host = urllib.splituser(host)
if user:
self.user, self.passwd = urllib.splitpasswd(user)
else:
self.user = ""
self.passwd = ""
self.host, self.port = urllib.splitport(host)
if self.host.startswith("[") and self.host.endswith("]"):
self.host = self.host[1:-1]
self.path, self.query = urllib.splitquery(rest)
self.user = self.user and urllib.unquote(self.user) or ""
self.passwd = self.passwd and urllib.unquote(self.passwd) or ""
self.path = urllib.unquote(self.path)
def __str__(self):
if self.scheme in Fetcher.getLocalSchemes():
return "%s://%s" % (self.scheme, urllib.quote(self.path))
url = self.scheme+"://"
if self.user:
url += urllib.quote(self.user)
if self.passwd:
url += ":"
url += urllib.quote(self.passwd)
url += "@"
url += self.host
if self.port:
url += ":%s" % self.port
if self.path:
url += urllib.quote(self.path)
else:
url += "/"
if self.query:
url += "?"
url += self.query
return url
class FetcherHandler(object):
def __init__(self, fetcher):
self._fetcher = fetcher
self._queue = []
self._cancel = False
def getQueue(self):
return self._queue
def wasCancelled(self):
return self._cancel
def enqueue(self, item):
self._queue.append(item)
def dequeue(self, item):
self._queue.remove(item)
def start(self):
# Fetcher is starting.
self._queue.sort()
self._cancel = False
def stop(self):
# Fetcher is stopping.
pass
def cancel(self):
# Downloads are being cancelled.
self._cancel = True
queue = self._queue[:]
del self._queue[:]
for item in queue:
item.setCancelled()
def changeActiveDownloads(self, value):
return self._fetcher.changeActiveDownloads(value)
def tick(self):
# Ticking does periodic maintenance of the tasks running
# inside the handler. It should return true while there
# is still something to be done, and should not lock for
# very long. Threads should be started for that purpose.
return False
def getLocalPath(self, item):
return self._fetcher.getLocalPath(item)
def runLocal(self, caching=None):
# That's part of the caching magic.
fetcher = self._fetcher
if not caching:
caching = fetcher.getCaching()
if caching is not NEVER:
uncompressor = fetcher.getUncompressor()
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
localpath = self.getLocalPath(item)
uncomphandler = uncompressor.getHandler(localpath)
if uncomphandler and item.getInfo("uncomp"):
uncomppath = uncomphandler.getTargetPath(localpath)
valid, reason = fetcher.validate(item, uncomppath,
withreason=True,
uncomp=True)
if not valid and fetcher.validate(item, localpath):
uncomphandler.uncompress(localpath)
valid, reason = fetcher.validate(item, uncomppath,
withreason=True,
uncomp=True)
elif valid and not fetcher.hasStrongValidate(item, True):
valid, reason = fetcher.validate(item, localpath,
withreason=True)
localpath = uncomppath
else:
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if valid:
del self._queue[i]
item.setSucceeded(localpath)
elif caching is ALWAYS:
del self._queue[i]
item.setFailed(reason)
class FileHandler(FetcherHandler):
RETRIES = 3
def __init__(self, *args):
FetcherHandler.__init__(self, *args)
self._active = False
self._mediaset = self._fetcher.getMediaSet()
self._forcecopy = {}
def stop(self):
FetcherHandler.stop(self)
self._forcecopy.clear()
def processMedias(self):
self._forcecopy.clear()
for item in self._queue:
localpath = item.getURL().path
localpath, media = self._mediaset.processFilePath(localpath)
if media:
if (not media.wasMounted() and
self._fetcher.getForceMountedCopy()):
self._forcecopy[item] = True
if isinstance(media, DeviceMedia):
# We don't want item.getURL().original changed, so that
# progress still shows the original path.
item.getURL().path = localpath
def getLocalPath(self, item):
if item in self._forcecopy or self._fetcher.getForceCopy():
return FetcherHandler.getLocalPath(self, item)
else:
return item.getURL().path
def runLocal(self):
self.processMedias()
if self._fetcher.getForceCopy():
FetcherHandler.runLocal(self)
else:
# First, handle compressed files without uncompressed
# versions available.
fetcher = self._fetcher
caching = fetcher.getCaching()
uncompressor = fetcher.getUncompressor()
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
if item in self._forcecopy:
if caching is not ALWAYS:
del self._queue[i]
continue
elif not item.getInfo("uncomp"):
continue
localpath = self.getLocalPath(item)
uncomphandler = uncompressor.getHandler(localpath)
if uncomphandler:
uncomppath = uncomphandler.getTargetPath(localpath)
if not fetcher.validate(item, uncomppath, uncomp=True):
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if valid:
linkpath = self._fetcher.getLocalPath(item)
if os.path.isfile(linkpath):
os.unlink(linkpath)
os.symlink(localpath, linkpath)
uncomppath = uncomphandler.getTargetPath(linkpath)
uncomphandler.uncompress(linkpath)
valid, reason = fetcher.validate(item, uncomppath,
withreason=True,
uncomp=True)
os.unlink(linkpath)
if valid:
item.setSucceeded(uncomppath)
else:
item.setFailed(reason)
del self._queue[i]
# Then, everything else, but the items selected in self._forcecopy
FetcherHandler.runLocal(self, caching=ALWAYS)
if caching is not ALWAYS:
self._queue.extend(self._forcecopy.keys())
def tick(self):
if self._queue and not self._active:
self._active = True
thread.start_new_thread(self.copy, ())
return self._active
def copy(self):
while self._queue:
item = self._queue.pop(0)
item.start()
retries = 0
filepath = item.getURL().path
localpath = self.getLocalPath(item)
assert filepath != localpath
while retries < self.RETRIES:
try:
input = open(filepath)
output = open(localpath, "w")
while True:
data = input.read(BLOCKSIZE)
if not data:
break
output.write(data)
except (IOError, OSError), e:
error = unicode(e)
retries += 1
else:
item.setSucceeded(localpath)
break
else:
item.setFailed(error)
self._active = False
Fetcher.setHandler("file", FileHandler, local=True)
class LocalMediaHandler(FileHandler):
def runLocal(self):
if not self._fetcher.getForceCopy():
# When not copying, convert earlier to get local files
# from the media.
self.convertToFile()
FileHandler.runLocal(self)
def start(self):
self.convertToFile()
FileHandler.start(self)
def convertToFile(self):
mediaset = self._fetcher.getMediaSet()
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
url = item.getURL()
if url.scheme == "localmedia":
itempath = url.path
media = item.getInfo("media")
if not media:
media = mediaset.getDefault()
if media:
media.mount()
else:
mediaset.mountAll()
media = mediaset.findFile(itempath)
if not media or not media.isMounted():
item.setFailed(_("Media not found"))
del self._queue[i]
continue
item.setURL(media.joinURL(itempath))
Fetcher.setHandler("localmedia", LocalMediaHandler, local=True)
class FTPHandler(FetcherHandler):
MAXACTIVE = 5
MAXINACTIVE = 5
MAXPERHOST = 2
TIMEOUT = 60
def __init__(self, *args):
FetcherHandler.__init__(self, *args)
self._active = {} # ftp -> host
self._inactive = {} # ftp -> (user, host, port)
self._lock = thread.allocate_lock()
self._activelimit = {} # host -> num
def tick(self):
import ftplib
self._lock.acquire()
if self._queue:
if len(self._active) < self.MAXACTIVE:
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
url = item.getURL()
hostactive = [x for x in self._active
if self._active[x] == url.host]
maxactive = self._activelimit.get(url.host,
self.MAXPERHOST)
if (len(hostactive) < maxactive and
self.changeActiveDownloads(+1)):
del self._queue[i]
userhost = (url.user, url.host, url.port)
for ftp in self._inactive:
if self._inactive[ftp] == userhost:
del self._inactive[ftp]
self._active[ftp] = url.host
thread.start_new_thread(self.fetch, (ftp, item))
break
else:
if len(self._inactive) > self.MAXINACTIVE:
del self._inactive[ftp]
ftp = ftplib.FTP()
ftp.lasttime = self._fetcher.time
self._active[ftp] = url.host
thread.start_new_thread(self.connect,
(ftp, item, len(hostactive)))
self._lock.release()
return bool(self._queue or self._active)
def connect(self, ftp, item, active):
item.start()
url = item.getURL()
import ftplib
try:
ftp.connect(url.host, url.port)
ftp.login(url.user, url.passwd)
except (socket.error, ftplib.Error, EOFError), e:
if (isinstance(e, ftplib.error_perm) or
isinstance(e, ftplib.error_temp)) and active:
item.reset()
self._lock.acquire()
self._queue.append(item)
self._lock.release()
self._activelimit[item.getURL().host] = active
else:
try:
errmsg = unicode(e[1])
except IndexError:
errmsg = unicode(e)
item.setFailed(errmsg)
self._lock.acquire()
del self._active[ftp]
self._lock.release()
self.changeActiveDownloads(-1)
else:
self.fetch(ftp, item)
def fetch(self, ftp, item):
import ftplib
fetcher = self._fetcher
url = item.getURL()
if self._cancel:
item.setCancelled()
self.changeActiveDownloads(-1)
return
item.start()
try:
try:
ftp.cwd(os.path.dirname(url.path))
except ftplib.Error:
if ftp.lasttime+self.TIMEOUT < fetcher.time:
raise EOFError
raise
filename = os.path.basename(url.path)
localpath = self.getLocalPath(item)
mtime = None
total = None
# Check if the file exists at all.
ftp.nlst(filename)
try:
resp = ftp.sendcmd("MDTM "+filename)
if resp[:3] == "213":
mtimes = resp[3:].strip()
mtime = time.mktime(time.strptime(mtimes, "%Y%m%d%H%M%S"))
except (ftplib.Error, ValueError, AttributeError):
pass
try:
total = ftp.size(filename)
except ftplib.Error:
pass
else:
size = item.getInfo("size")
if size and size != total:
raise Error, _("Server reports unexpected size")
if (not mtime or not os.path.isfile(localpath) or
mtime != os.path.getmtime(localpath) or
not fetcher.validate(item, localpath)):
localpathpart = localpath+".part"
if (os.path.isfile(localpathpart) and
(not total or os.path.getsize(localpathpart) < total)):
rest = os.path.getsize(localpathpart)
openmode = "a"
item.current = rest
else:
rest = None
openmode = "w"
item.current = 0
try:
local = open(localpathpart, openmode)
except (IOError, OSError), e:
raise Error, "%s: %s" % (localpathpart, e)
def write(data):
if self._cancel:
raise FetcherCancelled
local.write(data)
item.current += len(data)
item.progress(item.current, total)
try:
try:
ftp.retrbinary("RETR "+filename, write, BLOCKSIZE,
rest)
except ftplib.error_perm:
iface.debug("Server does not support resume. \
Restarting...")
finally:
local.close()
if mtime:
os.utime(localpathpart, (mtime, mtime))
os.rename(localpathpart, localpath)
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if not valid:
if openmode == "a":
# Try again, from the very start.
item.reset()
self._lock.acquire()
self._queue.append(item)
self._lock.release()
else:
raise Error, reason
else:
if total:
fetchedsize = total-(rest or 0)
elif not rest:
fetchedsize = os.path.getsize(localpath)
else:
fetchedsize = None
item.setSucceeded(localpath, fetchedsize)
else:
item.setSucceeded(localpath)
except (socket.error, EOFError):
# Put it back on the queue, and kill this ftp object.
self._lock.acquire()
self._queue.append(item)
del self._active[ftp]
self._lock.release()
self.changeActiveDownloads(-1)
return
except (Error, IOError, OSError, ftplib.Error), e:
item.setFailed(unicode(e))
except FetcherCancelled:
item.setCancelled()
self._lock.acquire()
ftp.lasttime = fetcher.time
self._inactive[ftp] = (url.user, url.host, url.port)
del self._active[ftp]
self._lock.release()
self.changeActiveDownloads(-1)
Fetcher.setHandler("ftp", FTPHandler)
class URLLIBHandler(FetcherHandler):
MAXACTIVE = 5
def __init__(self, *args):
FetcherHandler.__init__(self, *args)
self._active = 0
self._lock = thread.allocate_lock()
def tick(self):
self._lock.acquire()
if self._queue:
while (self._active < self.MAXACTIVE and
self.changeActiveDownloads(+1)):
self._active += 1
thread.start_new_thread(self.fetch, ())
self._lock.release()
return bool(self._queue or self._active)
def fetch(self):
import urllib, rfc822, calendar
from time import time, sleep
class Opener(urllib.FancyURLopener):
user = None
passwd = None
def __init__(self, *args, **kwargs):
self.retrycount = 3
urllib.FancyURLopener.__init__(self, *args, **kwargs)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
self.retrycount -= 1
if self.retrycount <= 0:
return self.http_error_default(url, fp, errcode, errmsg, headers)
return urllib.FancyURLopener.http_error_401(self, url, fp, errcode, errmsg, headers, data)
def get_user_passwd(self, host, realm, clear_cache = 0):
return self.user, self.passwd
def http_error_default(self, url, fp, errcode, errmsg, headers):
if not fp:
fp = open("/dev/null")
info = urllib.addinfourl(fp, headers, "http:" + url)
info.errcode = errcode
info.errmsg = errmsg
return info
opener = Opener()
fetcher = self._fetcher
while not self._cancel:
self._lock.acquire()
if not self._queue:
self._lock.release()
break
item = self._queue.pop()
self._lock.release()
url = item.getURL()
opener.user = url.user
opener.passwd = url.passwd
item.start()
try:
localpath = self.getLocalPath(item)
current = 0
total = None
size = item.getInfo("size")
del opener.addheaders[:]
opener.addheader("User-Agent", "smart/" + VERSION)
if (os.path.isfile(localpath) and
fetcher.validate(item, localpath)):
mtime = os.path.getmtime(localpath)
opener.addheader("if-modified-since",
rfc822.formatdate(mtime))
localpathpart = localpath+".part"
if os.path.isfile(localpathpart):
partsize = os.path.getsize(localpathpart)
if not size or partsize < size:
opener.addheader("range", "bytes=%d-" % partsize)
else:
partsize = 0
remote = opener.open(url.original)
if hasattr(remote, "errcode") and remote.errcode == 416:
# Range not satisfiable, try again without it.
opener.addheaders = [x for x in opener.addheaders
if x[0] != "range"]
remote = opener.open(url.original)
if hasattr(remote, "errcode") and remote.errcode != 206:
# 206 = Partial Content
raise remote
info = remote.info()
if "content-length" in info:
total = int(info["content-length"])
elif size:
total = size
if "content-range" in info:
openmode = "a"
current = partsize
if "content-length" in info:
total += partsize
else:
partsize = 0
openmode = "w"
if size and total and size != total:
raise Error, _("Server reports unexpected size")
try:
local = open(localpathpart, openmode)
except (IOError, OSError), e:
raise IOError, "%s: %s" % (localpathpart, e)
rate_limit = self._fetcher._maxdownloadrate
if rate_limit:
rate_limit /= self._active
start = time()
try:
data = remote.read(BLOCKSIZE)
while data:
if self._cancel:
raise FetcherCancelled
local.write(data)
current += len(data)
item.progress(current, total)
if rate_limit:
elapsed_time = time() - start
if elapsed_time != 0:
rate = current / elapsed_time
expected_time = current / rate_limit
sleep_time = expected_time - elapsed_time
if sleep_time > 0:
sleep(sleep_time)
data = remote.read(BLOCKSIZE)
finally:
local.close()
remote.close()
os.rename(localpathpart, localpath)
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if not valid:
if openmode == "a":
# Try again, from the very start.
item.reset()
self._lock.acquire()
self._queue.append(item)
self._lock.release()
else:
raise Error, reason
else:
if total:
fetchedsize = total-partsize
elif not partsize:
fetchedsize = os.path.getsize(localpath)
else:
fetchedsize = None
item.setSucceeded(localpath, fetchedsize)
if "last-modified" in info:
mtimes = info["last-modified"]
mtimet = rfc822.parsedate(mtimes)
if mtimet:
mtime = calendar.timegm(mtimet)
os.utime(localpath, (mtime, mtime))
except urllib.addinfourl, remote:
if remote.errcode == 304: # Not modified
item.setSucceeded(localpath)
elif remote.errcode == 404:
# Use a standard translatable error message.
item.setFailed(_("File not found"))
else:
item.setFailed(remote.errmsg)
except (IOError, OSError, Error, socket.error), e:
try:
errmsg = unicode(e[1])
except IndexError:
errmsg = unicode(e)
item.setFailed(errmsg)
except FetcherCancelled:
item.setCancelled()
self._lock.acquire()
self._active -= 1
self._lock.release()
self.changeActiveDownloads(-1)
#Fetcher.setHandler("ftp", URLLIBHandler)
Fetcher.setHandler("http", URLLIBHandler)
Fetcher.setHandler("https", URLLIBHandler)
Fetcher.setHandler("gopher", URLLIBHandler)
# This is not in use, since urllib2 is not thread safe, and
# the authentication scheme requires additional steps which
# are still not implemented. Also, we need some way to handle
# 206 returns without breaking out.
"""
class URLLIB2Handler(FetcherHandler):
MAXACTIVE = 1
USECACHEDFTP = True
_openerinstalled = False
def __init__(self, *args):
FetcherHandler.__init__(self, *args)
if not URLLIB2Handler._openerinstalled:
from smart.util import urllib2
URLLIB2Handler._openerinstalled = True
handlerlist = []
if self.USECACHEDFTP:
handlerlist.append(urllib2.CacheFTPHandler)
handlerlist.append(urllib2.GopherHandler)
opener = urllib2.build_opener(urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
self._active = 0
self._lock = thread.allocate_lock()
def tick(self):
self._lock.acquire()
if self._queue:
while (self._active < self.MAXACTIVE and
self.changeActiveDownloads(+1)):
self._active += 1
thread.start_new_thread(self.fetch, ())
self._lock.release()
return bool(self._queue or self._active)
def fetch(self):
import urllib2, rfc822
fetcher = self._fetcher
while True:
self._lock.acquire()
if not self._queue:
self._lock.release()
break
item = self._queue.pop()
self._lock.release()
item.start()
url = item.getURL()
try:
localpath = self.getLocalPath(item)
current = 0
total = None
size = item.getInfo("size")
request = urllib2.Request(url.original)
if (os.path.isfile(localpath) and
fetcher.validate(item, localpath)):
mtime = os.path.getmtime(localpath)
request.add_header("if-modified-since",
rfc822.formatdate(mtime))
localpathpart = localpath+".part"
if os.path.isfile(localpathpart):
partsize = os.path.getsize(localpathpart)
if not size or partsize < size:
request.add_header("range", "bytes=%d-" % partsize)
else:
partsize = 0
try:
remote = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 416: # Range not satisfiable
del request.headers["Range"]
remote = urllib2.urlopen(request)
else:
raise
info = remote.info()
if "content-length" in info:
total = int(info["content-length"])
elif size:
total = size
if "content-range" in info:
openmode = "a"
current = partsize
total += partsize
else:
openmode = "w"
if size and total and size != total:
raise Error, _("Server reports unexpected size")
try:
local = open(localpathpart, openmode)
except (IOError, OSError), e:
raise IOError, "%s: %s" % (localpathpart, e)
try:
data = remote.read(BLOCKSIZE)
while data:
local.write(data)
current += len(data)
item.progress(current, total)
data = remote.read(BLOCKSIZE)
finally:
local.close()
remote.close()
os.rename(localpathpart, localpath)
valid, reason = fetcher.validate(url, localpath,
withreason=True)
if not valid:
if openmode == "a":
# Try again, from the very start.
item.reset()
self._lock.acquire()
self._queue.append(item)
self._lock.release()
else:
raise Error, reason
else:
if total:
fetchedsize = total-partsize
elif not partsize:
fetchedsize = os.path.getsize(localpath)
else:
fetchedsize = None
item.setSucceeded(localpath, fetchedsize)
if "last-modified" in info:
mtimes = info["last-modified"]
mtimet = rfc822.parsedate(mtimes)
if mtimet:
mtime = time.mktime(mtimet)
os.utime(localpath, (mtime, mtime))
except urllib2.HTTPError, e:
if e.code == 304: # Not modified
item.setSucceeded(localpath)
else:
item.setFailed(unicode(e))
except (IOError, OSError, Error), e:
item.setFailed(unicode(e))
self._lock.acquire()
self._active -= 1
self._lock.release()
self.changeActiveDownloads(-1)
#Fetcher.setHandler("ftp", URLLIB2Handler)
Fetcher.setHandler("http", URLLIB2Handler)
Fetcher.setHandler("https", URLLIB2Handler)
Fetcher.setHandler("gopher", URLLIB2Handler)
"""#"""
class PyCurlHandler(FetcherHandler):
MAXACTIVE = 5
MAXINACTIVE = 5
MAXPERHOST = 2
def __init__(self, *args):
import pycurl
FetcherHandler.__init__(self, *args)
self._active = {} # handle -> (scheme, host)
self._inactive = {} # handle -> (user, host, port)
self._activelimit = {} # host -> num
self._running = False
self._multi = pycurl.CurlMulti()
self._lock = thread.allocate_lock()
def tick(self):
import pycurl
if not self._running and (self._queue or self._active):
self._running = True
thread.start_new_thread(self.perform, ())
fetcher = self._fetcher
multi = self._multi
if self._cancel:
self._lock.acquire()
for handle in self._active:
item = handle.item
item.setCancelled()
url = item.getURL()
multi.remove_handle(handle)
userhost = (url.user, url.host, url.port)
self._active.clear()
self._lock.release()
num = 1
while num != 0:
self._lock.acquire()
num, succeeded, failed = multi.info_read()
self._lock.release()
self.changeActiveDownloads(-len(succeeded)-len(failed))
for handle in succeeded:
item = handle.item
local = handle.local
localpath = handle.localpath
url = item.getURL()
local.close()
self._lock.acquire()
multi.remove_handle(handle)
self._lock.release()
http_code = handle.getinfo(pycurl.HTTP_CODE)
if (http_code == 404 or
handle.getinfo(pycurl.SIZE_DOWNLOAD) == 0):
# Not modified or not found
os.unlink(localpath+".part")
else:
if os.path.isfile(localpath):
os.unlink(localpath)
os.rename(localpath+".part", localpath)
mtime = handle.getinfo(pycurl.INFO_FILETIME)
if mtime != -1:
os.utime(localpath, (mtime, mtime))
del self._active[handle]
userhost = (url.user, url.host, url.port)
self._inactive[handle] = userhost
if http_code == 404:
item.setFailed(_("File not found"))
else:
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if valid:
fetchedsize = handle.getinfo(pycurl.SIZE_DOWNLOAD)
item.setSucceeded(localpath, fetchedsize)
elif handle.partsize:
self._queue.append(item)
else:
item.setFailed(reason)
for handle, errno, errmsg in failed:
item = handle.item
local = handle.local
localpath = handle.localpath
url = item.getURL()
local.close()
self._lock.acquire()
multi.remove_handle(handle)
self._lock.release()
http_code = handle.getinfo(pycurl.HTTP_CODE)
del self._active[handle]
userhost = (url.user, url.host, url.port)
self._inactive[handle] = userhost
if handle.partsize and "byte ranges" in errmsg:
os.unlink(localpath+".part")
item.reset()
self._queue.append(item)
elif handle.active and "password" in errmsg:
item.reset()
self._queue.append(item)
self._activelimit[item.getURL().host] = handle.active
del self._inactive[handle]
elif http_code == 404:
# Use a standard translatable error message.
item.setFailed(_("File not found"))
else:
item.setFailed(errmsg)
if self._queue:
if len(self._active) < self.MAXACTIVE:
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
url = item.getURL()
schemehost = (url.scheme, url.host)
hostactive = [x for x in self._active
if self._active[x] == schemehost]
maxactive = self._activelimit.get(url.host,
self.MAXPERHOST)
if (len(hostactive) < maxactive and
self.changeActiveDownloads(+1)):
del self._queue[i]
userhost = (url.user, url.host, url.port)
for handle in self._inactive:
if self._inactive[handle] == userhost:
del self._inactive[handle]
break
else:
if len(self._inactive) > self.MAXINACTIVE:
del self._inactive[handle]
handle = pycurl.Curl()
localpath = self.getLocalPath(item)
localpathpart = localpath+".part"
size = item.getInfo("size")
if os.path.isfile(localpathpart):
partsize = os.path.getsize(localpathpart)
if size and partsize >= size:
partsize = 0
else:
partsize = 0
handle.partsize = partsize
if partsize:
openmode = "a"
handle.setopt(pycurl.RESUME_FROM_LARGE,
long(partsize))
else:
openmode = "w"
handle.setopt(pycurl.RESUME_FROM_LARGE, 0L)
try:
local = open(localpathpart, openmode)
except (IOError, OSError), e:
item.setFailed("%s: %s" % (localpathpart, e))
del self._active[handle]
self.changeActiveDownloads(-1)
continue
handle.item = item
handle.local = local
handle.localpath = localpath
handle.active = len(hostactive)
item.start()
def progress(downtotal, downcurrent,
uptotal, upcurrent, item=item,
size=size, partsize=partsize):
if not downtotal:
if size and downcurrent:
item.progress(partsize+downcurrent, size)
else:
item.progress(partsize+downcurrent,
partsize+downtotal)
handle.setopt(pycurl.URL, str(url))
handle.setopt(pycurl.OPT_FILETIME, 1)
handle.setopt(pycurl.LOW_SPEED_LIMIT, 1)
handle.setopt(pycurl.LOW_SPEED_TIME, SOCKETTIMEOUT)
handle.setopt(pycurl.NOPROGRESS, 0)
handle.setopt(pycurl.PROGRESSFUNCTION, progress)
handle.setopt(pycurl.WRITEDATA, local)
handle.setopt(pycurl.FOLLOWLOCATION, 1)
handle.setopt(pycurl.MAXREDIRS, 5)
handle.setopt(pycurl.HTTPHEADER, ["Pragma:"])
handle.setopt(pycurl.USERAGENT, "smart/" + VERSION)
handle.setopt(pycurl.FAILONERROR, 1)
# check if we have a valid local file and use I-M-S
if fetcher.validate(item, localpath):
handle.setopt(pycurl.TIMECONDITION,
pycurl.TIMECONDITION_IFMODSINCE)
mtime = os.path.getmtime(localpath)
if url.scheme == "ftp":
mtime += 1 # libcurl handles ftp mtime wrongly
handle.setopt(pycurl.TIMEVALUE, int(mtime))
else:
# reset the I-M-S option
handle.setopt(pycurl.TIMECONDITION,
pycurl.TIMECONDITION_NONE)
rate_limit = self._fetcher._maxdownloadrate
if rate_limit:
rate_limit /= self._active
handle.setopt(pycurl.MAX_RECV_SPEED_LARGE, rate_limit)
self._active[handle] = schemehost
self._lock.acquire()
multi.add_handle(handle)
self._lock.release()
return bool(self._queue or self._active)
def perform(self):
import pycurl
multi = self._multi
mp = pycurl.E_CALL_MULTI_PERFORM
while self._queue or self._active:
self._lock.acquire()
res = mp
while res == mp:
res, num = multi.perform()
self._lock.release()
multi.select(1.0)
# Keep in mind that even though the while above has exited due to
# self._active being False, it may actually be true *here* due to
# race conditions.
self._running = False
try:
import pycurl
except ImportError:
pycurl = None
def enablePycurl():
if pycurl:
schemes = pycurl.version_info()[8]
for scheme in schemes:
if scheme != "file":
Fetcher.setHandler(scheme, PyCurlHandler)
hooks.register("enable-pycurl", enablePycurl)
class SCPHandler(FetcherHandler):
MAXACTIVE = 5
MAXPERHOST = 2
def __init__(self, *args):
FetcherHandler.__init__(self, *args)
self._active = [] # item
self._lock = thread.allocate_lock()
def tick(self):
import ftplib
self._lock.acquire()
if self._queue:
if len(self._active) < self.MAXACTIVE:
for i in range(len(self._queue)-1,-1,-1):
item = self._queue[i]
url = item.getURL()
hostactive = [x for x in self._active
if x.getURL().host == url.host]
if (len(hostactive) < self.MAXPERHOST and
self.changeActiveDownloads(+1)):
del self._queue[i]
self._active.append(item)
item.total = None
item.localpath = None
thread.start_new_thread(self.fetch, (item,))
prog = iface.getSubProgress(self._fetcher)
for item in self._active:
if item.total and item.localpath:
try:
size = os.path.getsize(item.localpath)
except OSError:
pass
else:
item.progress(size, item.total)
self._lock.release()
return bool(self._queue or self._active)
def fetch(self, item):
from smart.util.ssh import SSH
fetcher = self._fetcher
prog = iface.getSubProgress(self._fetcher)
item.start()
url = item.getURL()
if not url.user:
import pwd
url.user = pwd.getpwuid(os.getuid()).pw_name
if url.host[-1] == ":":
url.host = url.host[:-1]
locurl = URL()
locurl.scheme = url.scheme
locurl.user = url.user
locurl.host = url.host
locurl.port = url.port
def getpassword(location=str(locurl)):
return iface.askPassword(location)
del locurl
ssh = SSH(url.user, url.host, url.passwd, getpassword)
try:
localpath = self.getLocalPath(item)
mtime = None
total = None
size = item.getInfo("size")
status, output = ssh.ssh("stat -c '%%Y %%s' %s" % url.path)
if status == 0:
tokens = output.split()
try:
mtime = int(tokens[0])
total = int(tokens[1])
except ValueError:
if size:
total = size
else:
if size and size != total:
raise Error, _("Server reports unexpected size")
elif size:
total = size
item.total = total
fetchedsize = 0
if (not mtime or not os.path.isfile(localpath) or
mtime != os.path.getmtime(localpath) or
not fetcher.validate(item, localpath)):
item.localpath = localpath+".part"
status, output = ssh.rscp(url.path, item.localpath)
if status != 0:
raise Error, output
os.rename(item.localpath, localpath)
fetchedsize = os.path.getsize(localpath)
if mtime:
os.utime(localpath, (mtime, mtime))
valid, reason = fetcher.validate(item, localpath,
withreason=True)
if not valid:
raise Error, reason
except (Error, IOError, OSError), e:
item.setFailed(unicode(e))
else:
item.setSucceeded(localpath, fetchedsize)
self._lock.acquire()
self._active.remove(item)
self._lock.release()
self.changeActiveDownloads(-1)
Fetcher.setHandler("scp", SCPHandler)
# vim:ts=4:sw=4:et
| gpl-2.0 | 3,637,493,377,482,120,700 | 34.629921 | 106 | 0.483727 | false |
GaretJax/docker-deployer | deployer/routing/models.py | 1 | 1776 | import socket
import struct
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from structlog import get_logger
from deployer.applications.models import Instance
from deployer.utils import get_container_ip
logger = get_logger()
Base = declarative_base()
class Route(Base):
__tablename__ = 'route'
id = Column(Integer(), primary_key=True)
routing_key = Column(String(255), nullable=False)
weight = Column(Integer(), nullable=False, default=1)
instance_id = Column(Integer(), ForeignKey(Instance.id), nullable=False)
instance = relationship(
Instance,
backref=backref('routes', lazy='dynamic')
)
def update(self, frontend_name):
client = self.instance.host.get_client()
instance_ip = self.instance.get_ip()
router_ip = get_container_ip(client, frontend_name)
payload = {
'key': self.routing_key,
'address': '{}:{}'.format(instance_ip, 5510),
}
msg = ''
for k, v in payload.iteritems():
k, v = str(k), str(v)
msg += struct.pack('<h', len(k)) + str(k)
msg += struct.pack('<h', len(v)) + v
remove = self.instance.stopped is not None
msg = struct.pack('<BhB', 224, len(msg), 1 if remove else 0) + msg
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg, (router_ip, 5500))
logger.log(
'route.discard' if remove else 'route.update',
routing_key=self.routing_key,
instance=instance_ip,
host=self.instance.host.name,
router=router_ip,
weight=self.weight
)
| mit | 3,688,632,540,628,936,000 | 30.157895 | 76 | 0.614865 | false |
jphaupt/understanding-busy-cops | tests/Test_queueingDefs_sat_prob.py | 1 | 1145 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 09:58:16 2016
@author: Internet
"""
import unittest
import sys, os.path
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if not path in sys.path:
sys.path.insert(1, path)
del path
import queueingDefs as Q
print()
class Test_queuingDefs_sat_prob(unittest.TestCase):
def test_231(self):
print("Testing sat_prob(2, 3, 1)")
self.assertAlmostEqual(Q.sat_prob(2, 3, 1), 2/3.0)
def test_23_1decimal(self):
print("Testing sat_prob(2, 3, 1.0)")
self.assertAlmostEqual(Q.sat_prob(2, 3, 1.0), 2/3.0)
def test_23_1decimal5(self):
print("Testing sat_prob(2, 3, 1.5)")
self.assertAlmostEqual(Q.sat_prob(2, 3, 1.2), (2/3.0)*(0.8)+(2/12.0)*(0.2))
def test_saturated(self):
print("Testing sat_prob(3, 2, 1)")
self.assertRaises(Q.SaturationError, Q.sat_prob, 3, 2, 1)
def test_negative_units(self):
print("Testing sat_prob(3, 2, 1)")
self.assertRaises(ValueError, Q.sat_prob, 3, 2, -2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,177,568,567,995,629,000 | 24.444444 | 83 | 0.581659 | false |
mvaled/sentry | src/sentry/api/helpers/group_index.py | 1 | 35584 | from __future__ import absolute_import
import logging
import six
from collections import defaultdict
from datetime import timedelta
from uuid import uuid4
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from sentry import eventstream, features
from sentry.api.base import audit_logger
from sentry.api.fields import Actor, ActorField
from sentry.api.serializers import serialize
from sentry.api.serializers.models.actor import ActorSerializer
from sentry.api.serializers.models.group import SUBSCRIPTION_REASON_MAP
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity,
Commit,
Group,
GroupAssignee,
GroupHash,
GroupLink,
GroupStatus,
GroupTombstone,
GroupResolution,
GroupBookmark,
GroupSeen,
GroupShare,
GroupSnooze,
GroupSubscription,
GroupSubscriptionReason,
Release,
Repository,
TOMBSTONE_FIELDS_FROM_GROUP,
Team,
User,
UserOption,
)
from sentry.models.group import looks_like_short_id
from sentry.api.issue_search import convert_query_values, InvalidSearchQuery, parse_search_query
from sentry.signals import (
issue_deleted,
issue_ignored,
issue_resolved,
advanced_search_feature_gated,
)
from sentry.tasks.deletion import delete_groups as delete_groups_task
from sentry.tasks.integrations import kick_off_status_syncs
from sentry.tasks.merge import merge_groups
from sentry.utils import metrics
from sentry.utils.audit import create_audit_entry
from sentry.utils.cursors import Cursor
from sentry.utils.functional import extract_lazy_object
delete_logger = logging.getLogger("sentry.deletions.api")
class ValidationError(Exception):
pass
def build_query_params_from_request(request, organization, projects, environments):
query_kwargs = {"projects": projects, "sort_by": request.GET.get("sort", DEFAULT_SORT_OPTION)}
limit = request.GET.get("limit")
if limit:
try:
query_kwargs["limit"] = int(limit)
except ValueError:
raise ValidationError("invalid limit")
# TODO: proper pagination support
if request.GET.get("cursor"):
try:
query_kwargs["cursor"] = Cursor.from_string(request.GET.get("cursor"))
except ValueError:
raise ParseError(detail="Invalid cursor parameter.")
query = request.GET.get("query", "is:unresolved").strip()
if query:
try:
search_filters = convert_query_values(
parse_search_query(query), projects, request.user, environments
)
except InvalidSearchQuery as e:
raise ValidationError(u"Your search query could not be parsed: {}".format(e.message))
validate_search_filter_permissions(organization, search_filters, request.user)
query_kwargs["search_filters"] = search_filters
return query_kwargs
# List of conditions that mark a SearchFilter as an advanced search. Format is
# (lambda SearchFilter(): <boolean condition>, '<feature_name')
advanced_search_features = [
(lambda search_filter: search_filter.is_negation, "negative search"),
(lambda search_filter: search_filter.value.is_wildcard(), "wildcard search"),
]
def validate_search_filter_permissions(organization, search_filters, user):
"""
Verifies that an organization is allowed to perform the query that they
submitted.
If the org is using a feature they don't have access to, raises
`ValidationError` with information which part of the query they don't have
access to.
:param search_filters:
"""
# If the organization has advanced search, then no need to perform any
# other checks since they're allowed to use all search features
if features.has("organizations:advanced-search", organization):
return
for search_filter in search_filters:
for feature_condition, feature_name in advanced_search_features:
if feature_condition(search_filter):
advanced_search_feature_gated.send_robust(
user=user, organization=organization, sender=validate_search_filter_permissions
)
raise ValidationError(
u"You need access to the advanced search feature to use {}".format(feature_name)
)
def get_by_short_id(organization_id, is_short_id_lookup, query):
if is_short_id_lookup == "1" and looks_like_short_id(query):
try:
return Group.objects.by_qualified_short_id(organization_id, query)
except Group.DoesNotExist:
pass
STATUS_CHOICES = {
"resolved": GroupStatus.RESOLVED,
"unresolved": GroupStatus.UNRESOLVED,
"ignored": GroupStatus.IGNORED,
"resolvedInNextRelease": GroupStatus.UNRESOLVED,
# TODO(dcramer): remove in 9.0
"muted": GroupStatus.IGNORED,
}
class InCommitValidator(serializers.Serializer):
commit = serializers.CharField(required=True)
repository = serializers.CharField(required=True)
def validate_repository(self, value):
project = self.context["project"]
try:
value = Repository.objects.get(organization_id=project.organization_id, name=value)
except Repository.DoesNotExist:
raise serializers.ValidationError("Unable to find the given repository.")
return value
def validate(self, attrs):
attrs = super(InCommitValidator, self).validate(attrs)
repository = attrs.get("repository")
commit = attrs.get("commit")
if not repository:
raise serializers.ValidationError(
{"repository": ["Unable to find the given repository."]}
)
if not commit:
raise serializers.ValidationError({"commit": ["Unable to find the given commit."]})
try:
commit = Commit.objects.get(repository_id=repository.id, key=commit)
except Commit.DoesNotExist:
raise serializers.ValidationError({"commit": ["Unable to find the given commit."]})
return commit
class StatusDetailsValidator(serializers.Serializer):
inNextRelease = serializers.BooleanField()
inRelease = serializers.CharField()
inCommit = InCommitValidator(required=False)
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in minutes, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)
ignoreUserCount = serializers.IntegerField()
# in minutes, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)
def validate_inRelease(self, value):
project = self.context["project"]
if value == "latest":
try:
value = (
Release.objects.filter(
projects=project, organization_id=project.organization_id
)
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.order_by("-sort")[0]
)
except IndexError:
raise serializers.ValidationError(
"No release data present in the system to form a basis for 'Next Release'"
)
else:
try:
value = Release.objects.get(
projects=project, organization_id=project.organization_id, version=value
)
except Release.DoesNotExist:
raise serializers.ValidationError(
"Unable to find a release with the given version."
)
return value
def validate_inNextRelease(self, value):
project = self.context["project"]
try:
value = (
Release.objects.filter(projects=project, organization_id=project.organization_id)
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.order_by("-sort")[0]
)
except IndexError:
raise serializers.ValidationError(
"No release data present in the system to form a basis for 'Next Release'"
)
return value
class GroupValidator(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(STATUS_CHOICES.keys(), STATUS_CHOICES.keys()))
statusDetails = StatusDetailsValidator()
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
isSubscribed = serializers.BooleanField()
merge = serializers.BooleanField()
discard = serializers.BooleanField()
ignoreDuration = serializers.IntegerField()
ignoreCount = serializers.IntegerField()
# in minutes, max of one week
ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)
ignoreUserCount = serializers.IntegerField()
# in minutes, max of one week
ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)
assignedTo = ActorField()
# TODO(dcramer): remove in 9.0
# for the moment, the CLI sends this for any issue update, so allow nulls
snoozeDuration = serializers.IntegerField(allow_null=True)
def validate_assignedTo(self, value):
if (
value
and value.type is User
and not self.context["project"].member_set.filter(user_id=value.id).exists()
):
raise serializers.ValidationError("Cannot assign to non-team member")
if (
value
and value.type is Team
and not self.context["project"].teams.filter(id=value.id).exists()
):
raise serializers.ValidationError(
"Cannot assign to a team without access to the project"
)
return value
def validate(self, attrs):
attrs = super(GroupValidator, self).validate(attrs)
if len(attrs) > 1 and "discard" in attrs:
raise serializers.ValidationError("Other attributes cannot be updated when discarding")
return attrs
def handle_discard(request, group_list, projects, user):
for project in projects:
if not features.has("projects:discard-groups", project, actor=user):
return Response({"detail": ["You do not have that feature enabled"]}, status=400)
# grouped by project_id
groups_to_delete = defaultdict(list)
for group in group_list:
with transaction.atomic():
try:
tombstone = GroupTombstone.objects.create(
previous_group_id=group.id,
actor_id=user.id if user else None,
**{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP}
)
except IntegrityError:
# in this case, a tombstone has already been created
# for a group, so no hash updates are necessary
pass
else:
groups_to_delete[group.project_id].append(group)
GroupHash.objects.filter(group=group).update(
group=None, group_tombstone_id=tombstone.id
)
for project in projects:
_delete_groups(request, project, groups_to_delete.get(project.id), delete_type="discard")
return Response(status=204)
def _delete_groups(request, project, group_list, delete_type):
if not group_list:
return
# deterministic sort for sanity, and for very large deletions we'll
# delete the "smaller" groups first
group_list.sort(key=lambda g: (g.times_seen, g.id))
group_ids = [g.id for g in group_list]
Group.objects.filter(id__in=group_ids).exclude(
status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]
).update(status=GroupStatus.PENDING_DELETION)
eventstream_state = eventstream.start_delete_groups(project.id, group_ids)
transaction_id = uuid4().hex
GroupHash.objects.filter(project_id=project.id, group__id__in=group_ids).delete()
delete_groups_task.apply_async(
kwargs={
"object_ids": group_ids,
"transaction_id": transaction_id,
"eventstream_state": eventstream_state,
},
countdown=3600,
)
for group in group_list:
create_audit_entry(
request=request,
transaction_id=transaction_id,
logger=audit_logger,
organization_id=project.organization_id,
target_object=group.id,
)
delete_logger.info(
"object.delete.queued",
extra={
"object_id": group.id,
"transaction_id": transaction_id,
"model": type(group).__name__,
},
)
issue_deleted.send_robust(
group=group, user=request.user, delete_type=delete_type, sender=_delete_groups
)
def delete_groups(request, projects, organization_id, search_fn):
"""
`search_fn` refers to the `search.query` method with the appropriate
project, org, environment, and search params already bound
"""
group_ids = request.GET.getlist("id")
if group_ids:
group_list = list(
Group.objects.filter(
project__in=projects,
project__organization_id=organization_id,
id__in=set(group_ids),
).exclude(status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS])
)
else:
try:
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
cursor_result, _ = search_fn({"limit": 1000, "paginator_options": {"max_limit": 1000}})
except ValidationError as exc:
return Response({"detail": six.text_type(exc)}, status=400)
group_list = list(cursor_result)
if not group_list:
return Response(status=204)
groups_by_project_id = defaultdict(list)
for group in group_list:
groups_by_project_id[group.project_id].append(group)
for project in projects:
_delete_groups(request, project, groups_by_project_id.get(project.id), delete_type="delete")
return Response(status=204)
def self_subscribe_and_assign_issue(acting_user, group):
# Used during issue resolution to assign to acting user
# returns None if the user didn't elect to self assign on resolution
# or the group is assigned already, otherwise returns Actor
# representation of current user
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user, group=group, reason=GroupSubscriptionReason.status_change
)
self_assign_issue = UserOption.objects.get_value(
user=acting_user, key="self_assign_issue", default="0"
)
if self_assign_issue == "1" and not group.assignee_set.exists():
return Actor(type=User, id=acting_user.id)
def update_groups(request, projects, organization_id, search_fn):
group_ids = request.GET.getlist("id")
if group_ids:
group_list = Group.objects.filter(
project__organization_id=organization_id, project__in=projects, id__in=group_ids
)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
# TODO(jess): We may want to look into refactoring GroupValidator
# to support multiple projects, but this is pretty complicated
# because of the assignee validation. Punting on this for now.
for project in projects:
serializer = GroupValidator(data=request.data, partial=True, context={"project": project})
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.validated_data)
# so we won't have to requery for each group
project_lookup = {p.id: p for p in projects}
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
cursor_result, _ = search_fn({"limit": 1000, "paginator_options": {"max_limit": 1000}})
except ValidationError as exc:
return Response({"detail": six.text_type(exc)}, status=400)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
group_project_ids = {g.project_id for g in group_list}
# filter projects down to only those that have groups in the search results
projects = [p for p in projects if p.id in group_project_ids]
queryset = Group.objects.filter(id__in=group_ids)
discard = result.get("discard")
if discard:
return handle_discard(request, list(queryset), projects, acting_user)
statusDetails = result.pop("statusDetails", result)
status = result.get("status")
release = None
commit = None
if status in ("resolved", "resolvedInNextRelease"):
if status == "resolvedInNextRelease" or statusDetails.get("inNextRelease"):
# TODO(jess): We may want to support this for multi project, but punting on it for now
if len(projects) > 1:
return Response(
{"detail": "Cannot set resolved in next release for multiple projects."},
status=400,
)
release = (
statusDetails.get("inNextRelease")
or Release.objects.filter(
projects=projects[0], organization_id=projects[0].organization_id
)
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.order_by("-sort")[0]
)
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
"version": ""
}
status_details = {
"inNextRelease": True,
"actor": serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_next_release
res_type_str = "in_next_release"
res_status = GroupResolution.Status.pending
elif statusDetails.get("inRelease"):
# TODO(jess): We could update validation to check if release
# applies to multiple projects, but I think we agreed to punt
# on this for now
if len(projects) > 1:
return Response(
{"detail": "Cannot set resolved in release for multiple projects."}, status=400
)
release = statusDetails["inRelease"]
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
"version": release.version
}
status_details = {
"inRelease": release.version,
"actor": serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_release
res_type_str = "in_release"
res_status = GroupResolution.Status.resolved
elif statusDetails.get("inCommit"):
# TODO(jess): Same here, this is probably something we could do, but
# punting for now.
if len(projects) > 1:
return Response(
{"detail": "Cannot set resolved in commit for multiple projects."}, status=400
)
commit = statusDetails["inCommit"]
activity_type = Activity.SET_RESOLVED_IN_COMMIT
activity_data = {"commit": commit.id}
status_details = {
"inCommit": serialize(commit, request.user),
"actor": serialize(extract_lazy_object(request.user), request.user),
}
res_type_str = "in_commit"
else:
res_type_str = "now"
activity_type = Activity.SET_RESOLVED
activity_data = {}
status_details = {}
now = timezone.now()
metrics.incr("group.resolved", instance=res_type_str, skip_internal=True)
# if we've specified a commit, let's see if its already been released
# this will allow us to associate the resolution to a release as if we
# were simply using 'inRelease' above
# Note: this is different than the way commit resolution works on deploy
# creation, as a given deploy is connected to an explicit release, and
# in this case we're simply choosing the most recent release which contains
# the commit.
if commit and not release:
# TODO(jess): If we support multiple projects for release / commit resolution,
# we need to update this to find the release for each project (we shouldn't assume
# it's the same)
try:
release = (
Release.objects.filter(projects__in=projects, releasecommit__commit=commit)
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.order_by("-sort")[0]
)
res_type = GroupResolution.Type.in_release
res_status = GroupResolution.Status.resolved
except IndexError:
release = None
for group in group_list:
with transaction.atomic():
resolution = None
if release:
resolution_params = {
"release": release,
"type": res_type,
"status": res_status,
"actor_id": request.user.id if request.user.is_authenticated() else None,
}
resolution, created = GroupResolution.objects.get_or_create(
group=group, defaults=resolution_params
)
if not created:
resolution.update(datetime=timezone.now(), **resolution_params)
if commit:
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
affected = Group.objects.filter(id=group.id).update(
status=GroupStatus.RESOLVED, resolved_at=now
)
if not resolution:
created = affected
group.status = GroupStatus.RESOLVED
group.resolved_at = now
assigned_to = self_subscribe_and_assign_issue(acting_user, group)
if assigned_to is not None:
result["assignedTo"] = assigned_to
if created:
activity = Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=activity_type,
user=acting_user,
ident=resolution.id if resolution else None,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
issue_resolved.send_robust(
organization_id=organization_id,
user=acting_user or request.user,
group=group,
project=project_lookup[group.project_id],
resolution_type=res_type_str,
sender=update_groups,
)
kick_off_status_syncs.apply_async(
kwargs={"project_id": group.project_id, "group_id": group.id}
)
result.update({"status": "resolved", "statusDetails": status_details})
elif status:
new_status = STATUS_CHOICES[result["status"]]
with transaction.atomic():
happened = queryset.exclude(status=new_status).update(status=new_status)
GroupResolution.objects.filter(group__in=group_ids).delete()
if new_status == GroupStatus.IGNORED:
metrics.incr("group.ignored", skip_internal=True)
ignore_duration = (
statusDetails.pop("ignoreDuration", None)
or statusDetails.pop("snoozeDuration", None)
) or None
ignore_count = statusDetails.pop("ignoreCount", None) or None
ignore_window = statusDetails.pop("ignoreWindow", None) or None
ignore_user_count = statusDetails.pop("ignoreUserCount", None) or None
ignore_user_window = statusDetails.pop("ignoreUserWindow", None) or None
if ignore_duration or ignore_count or ignore_user_count:
if ignore_duration:
ignore_until = timezone.now() + timedelta(minutes=ignore_duration)
else:
ignore_until = None
for group in group_list:
state = {}
if ignore_count and not ignore_window:
state["times_seen"] = group.times_seen
if ignore_user_count and not ignore_user_window:
state["users_seen"] = group.count_users_seen()
GroupSnooze.objects.create_or_update(
group=group,
values={
"until": ignore_until,
"count": ignore_count,
"window": ignore_window,
"user_count": ignore_user_count,
"user_window": ignore_user_window,
"state": state,
"actor_id": request.user.id
if request.user.is_authenticated()
else None,
},
)
result["statusDetails"] = {
"ignoreCount": ignore_count,
"ignoreUntil": ignore_until,
"ignoreUserCount": ignore_user_count,
"ignoreUserWindow": ignore_user_window,
"ignoreWindow": ignore_window,
"actor": serialize(extract_lazy_object(request.user), request.user),
}
else:
GroupSnooze.objects.filter(group__in=group_ids).delete()
ignore_until = None
result["statusDetails"] = {}
else:
result["statusDetails"] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.IGNORED:
activity_type = Activity.SET_IGNORED
activity_data = {
"ignoreCount": ignore_count,
"ignoreDuration": ignore_duration,
"ignoreUntil": ignore_until,
"ignoreUserCount": ignore_user_count,
"ignoreUserWindow": ignore_user_window,
"ignoreWindow": ignore_window,
}
groups_by_project_id = defaultdict(list)
for group in group_list:
groups_by_project_id[group.project_id].append(group)
for project in projects:
project_groups = groups_by_project_id.get(project.id)
if project_groups:
issue_ignored.send_robust(
project=project,
user=acting_user,
group_list=project_groups,
activity_data=activity_data,
sender=update_groups,
)
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if new_status == GroupStatus.UNRESOLVED:
kick_off_status_syncs.apply_async(
kwargs={"project_id": group.project_id, "group_id": group.id}
)
if "assignedTo" in result:
assigned_actor = result["assignedTo"]
if assigned_actor:
for group in group_list:
resolved_actor = assigned_actor.resolve()
GroupAssignee.objects.assign(group, resolved_actor, acting_user)
result["assignedTo"] = serialize(
assigned_actor.resolve(), acting_user, ActorSerializer()
)
else:
for group in group_list:
GroupAssignee.objects.deassign(group, acting_user)
is_member_map = {
project.id: project.member_set.filter(user=acting_user).exists() for project in projects
}
if result.get("hasSeen"):
for group in group_list:
if is_member_map.get(group.project_id):
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=project_lookup[group.project_id],
values={"last_seen": timezone.now()},
)
elif result.get("hasSeen") is False:
GroupSeen.objects.filter(group__in=group_ids, user=acting_user).delete()
if result.get("isBookmarked"):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project_lookup[group.project_id], group=group, user=acting_user
)
GroupSubscription.objects.subscribe(
user=acting_user, group=group, reason=GroupSubscriptionReason.bookmark
)
elif result.get("isBookmarked") is False:
GroupBookmark.objects.filter(group__in=group_ids, user=acting_user).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get("isSubscribed") in (True, False):
is_subscribed = result["isSubscribed"]
for group in group_list:
# NOTE: Subscribing without an initiating event (assignment,
# commenting, etc.) clears out the previous subscription reason
# to avoid showing confusing messaging as a result of this
# action. It'd be jarring to go directly from "you are not
# subscribed" to "you were subscribed due since you were
# assigned" just by clicking the "subscribe" button (and you
# may no longer be assigned to the issue anyway.)
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project_lookup[group.project_id],
values={"is_active": is_subscribed, "reason": GroupSubscriptionReason.unknown},
)
result["subscriptionDetails"] = {
"reason": SUBSCRIPTION_REASON_MAP.get(GroupSubscriptionReason.unknown, "unknown")
}
if "isPublic" in result:
# We always want to delete an existing share, because triggering
# an isPublic=True even when it's already public, should trigger
# regenerating.
for group in group_list:
if GroupShare.objects.filter(group=group).delete():
result["shareId"] = None
Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
if result.get("isPublic"):
for group in group_list:
share, created = GroupShare.objects.get_or_create(
project=project_lookup[group.project_id], group=group, user=acting_user
)
if created:
result["shareId"] = share.uuid
Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get("merge") and len(group_list) > 1:
# don't allow merging cross project
if len(projects) > 1:
return Response({"detail": "Merging across multiple projects is not supported"})
group_list_by_times_seen = sorted(
group_list, key=lambda g: (g.times_seen, g.id), reverse=True
)
primary_group, groups_to_merge = group_list_by_times_seen[0], group_list_by_times_seen[1:]
group_ids_to_merge = [g.id for g in groups_to_merge]
eventstream_state = eventstream.start_merge(
primary_group.project_id, group_ids_to_merge, primary_group.id
)
Group.objects.filter(id__in=group_ids_to_merge).update(status=GroupStatus.PENDING_MERGE)
transaction_id = uuid4().hex
merge_groups.delay(
from_object_ids=group_ids_to_merge,
to_object_id=primary_group.id,
transaction_id=transaction_id,
eventstream_state=eventstream_state,
)
Activity.objects.create(
project=project_lookup[primary_group.project_id],
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={"issues": [{"id": c.id} for c in groups_to_merge]},
)
result["merge"] = {
"parent": six.text_type(primary_group.id),
"children": [six.text_type(g.id) for g in groups_to_merge],
}
return Response(result)
| bsd-3-clause | 3,098,362,800,758,515,000 | 39.117249 | 100 | 0.582987 | false |
wintersandroid/sbrick-controller | SBrickCommunications.py | 1 | 17959 | import struct
import threading
import monotonic
import six
from bluepy.btle import Peripheral, BTLEException
from IdleObject import IdleObject
import gi
gi.require_version('Gtk', '3.0')
# noinspection PyUnresolvedReferences,PyPep8
from gi.repository import GObject
class SBrickChannelDrive:
def __init__(self, channel, event_send):
self.channel = channel
self.eventSend = event_send
self.pwm = 0
self.reverse = 0
self.timems = 0
self.timesec = 0
self.braked = False
self.brake_after_time = True
self.timestarted = 0.0
self.stopped = True
self.in_brake_time = False
self.brake_time_sec = 0.0
self.config_id = channel
def set_config_id(self, config_id):
self.config_id = config_id
def is_config_id(self, config_id):
return self.config_id == config_id
def drive(self, pwm, reverse, time, brake_after_time=False):
self.pwm = int(pwm)
if reverse:
self.reverse = 1
else:
self.reverse = 0
self.brake_time_sec = 0
self.braked = False
self.brake_after_time = brake_after_time
self.in_brake_time = False
self.timems = time
self.timesec = 0
if self.timems > 0:
self.timesec = time / 1000.0
self.timestarted = monotonic.monotonic()
def stop(self, braked=False):
self.pwm = 0
self.braked = braked
# print('stop', self.channel, self.braked)
def get_command_drive(self, cmdin):
if not self.in_brake_time:
if not self.braked and (not self.stopped or self.pwm > 0):
self.stopped = self.pwm == 0
# print("drive ", self.channel, self.stopped, self.reverse, self.pwm)
return cmdin + bytearray([self.channel, self.reverse, self.pwm])
return cmdin
def get_command_brake(self, cmdin):
if self.braked and not self.stopped:
self.pwm = 0
self.brake_time_sec = 1.0
self.brake_after_time = False
if not self.in_brake_time:
self.in_brake_time = True
self.timestarted = monotonic.monotonic()
# print("get_command_brake ", self.channel)
return cmdin + bytearray([self.channel])
return cmdin
def get_channel(self):
return self.channel
def set_pwm(self, pwm, change_reverse=False):
if pwm < 0:
if change_reverse:
self.reverse = 1
else:
if change_reverse:
self.reverse = 0
self.pwm = abs(pwm)
def set_reverse(self, reverse):
if reverse:
self.reverse = 1
else:
self.reverse = 0
def decrement_run_timer(self):
if self.timems > 0:
m = monotonic.monotonic()
if m - self.timestarted >= self.timesec:
self.stop(self.brake_after_time)
# print("time ", m - self.timestarted, self.timesec)
self.timems = 0
self.timesec = 0
self.stopped = False
return True
return False
def decrement_brake_timer(self):
if self.brake_time_sec > 0:
m = monotonic.monotonic()
td = m - self.timestarted
# print 'decrement_brake_timer', self.channel, self.brake_time_sec, td
if td >= self.brake_time_sec:
self.stop(False)
# print("brake time ", td, self.timesec)
self.timems = 0
self.timesec = 0
self.brake_time_sec = 0.0
self.in_brake_time = False
return True
return False
class SBrickCommunications(threading.Thread, IdleObject):
def __init__(self, sbrick_addr):
threading.Thread.__init__(self)
IdleObject.__init__(self)
self.lock = threading.RLock()
self.drivingLock = threading.RLock()
self.eventSend = threading.Event()
self.sBrickAddr = sbrick_addr
self.owner_password = None
self.brickChannels = [
SBrickChannelDrive(0, self.eventSend),
SBrickChannelDrive(1, self.eventSend),
SBrickChannelDrive(2, self.eventSend),
SBrickChannelDrive(3, self.eventSend),
]
self.SBrickPeripheral = None
self.stopFlag = False
self.characteristicRemote = None
self.need_authentication = False
self.authenticated = False
self.channel_config_ids = dict()
def set_channel_config_id(self, channel, config_id):
self.channel_config_ids[config_id] = channel
self.brickChannels[channel].set_config_id(config_id)
def terminate(self):
self.stopFlag = True
def is_driving(self):
locked = self.drivingLock.acquire(False)
if locked:
self.drivingLock.release()
return not locked
def connect_to_sbrick(self, owner_password):
self.owner_password = owner_password
self.start()
def run(self):
try:
monotime = 0.0
self.SBrickPeripheral = Peripheral()
self.SBrickPeripheral.connect(self.sBrickAddr)
service = self.SBrickPeripheral.getServiceByUUID('4dc591b0-857c-41de-b5f1-15abda665b0c')
characteristics = service.getCharacteristics('02b8cbcc-0e25-4bda-8790-a15f53e6010f')
for characteristic in characteristics:
if characteristic.uuid == '02b8cbcc-0e25-4bda-8790-a15f53e6010f':
self.characteristicRemote = characteristic
if self.characteristicRemote is None:
return
self.emit('sbrick_connected')
self.need_authentication = self.get_need_authentication()
self.authenticated = not self.need_authentication
if self.need_authentication:
if self.password_owner is not None:
self.authenticate_owner(self.password_owner)
while not self.stopFlag:
if self.authenticated:
if monotonic.monotonic() - monotime >= 0.05:
self.send_command()
monotime = monotonic.monotonic()
self.eventSend.wait(0.01)
for channel in self.brickChannels:
if channel.decrement_run_timer():
monotime = 0.0
self.drivingLock.release()
# print("stop run normal")
self.emit("sbrick_channel_stop", channel.channel)
if channel.decrement_brake_timer():
self.drivingLock.release()
# print("stop brake timer")
monotime = 0.0
self.emit("sbrick_channel_stop", channel.channel)
if self.authenticated:
self.stop_all()
self.send_command()
self.SBrickPeripheral.disconnect()
self.emit('sbrick_disconnected_ok')
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_channel(self, channel):
if isinstance(channel, six.integer_types):
return self.brickChannels[channel]
if isinstance(channel, six.string_types):
return self.brickChannels[self.channel_config_ids[channel]]
return None
def drive(self, channel, pwm, reverse, time, brake_after_time=False):
with self.lock:
ch = self.get_channel(channel)
if ch is not None:
ch.drive(pwm, reverse, time, brake_after_time)
self.emit("sbrick_drive_sent", ch.channel, time)
self.eventSend.set()
def stop(self, channel, braked=False):
with self.lock:
ch = self.get_channel(channel)
if ch is not None:
ch.stop(braked)
self.emit("sbrick_drive_sent", ch.channel, -2)
self.eventSend.set()
def stop_all(self):
with self.lock:
for channel in self.brickChannels:
channel.stop()
self.eventSend.set()
def change_pwm(self, channel, pwm, change_reverse=False):
with self.lock:
ch = self.get_channel(channel)
if ch is not None:
ch.set_pwm(pwm, change_reverse)
self.eventSend.set()
def change_reverse(self, channel, reverse):
with self.lock:
ch = self.get_channel(channel)
if ch is not None:
ch.set_reverse(reverse)
self.eventSend.set()
def send_command(self):
with self.lock:
# try:
drivecmd = bytearray([0x01])
brakecmd = bytearray([0x00])
for channel in self.brickChannels:
drivecmd = channel.get_command_drive(drivecmd)
brakecmd = channel.get_command_brake(brakecmd)
if len(drivecmd) > 1:
self.drivingLock.acquire()
self.characteristicRemote.write(drivecmd, True)
self.print_hex_string("drive sent", drivecmd)
if len(brakecmd) > 1:
self.characteristicRemote.write(brakecmd, True)
self.print_hex_string("brake sent", brakecmd)
# return True
# except Exception as ex:
# self.emit("sbrick_disconnected_error",ex.message)
# return False
def disconnect_sbrick(self):
with self.lock:
self.stopFlag = True
@staticmethod
def print_hex_string(what, strin):
out = what + " -> "
for chrx in strin:
out = "%s %0X" % (out, chrx)
print(out)
def get_voltage(self):
with self.lock:
try:
self.characteristicRemote.write(b"\x0f\x00")
value = self.characteristicRemote.read()
valueint = struct.unpack("<H", value)[0]
return (valueint * 0.83875) / 2047.0
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_temperature(self):
with self.lock:
try:
self.characteristicRemote.write(b"\x0f\x0e")
value = self.characteristicRemote.read()
valueint = struct.unpack("<H", value)[0]
return valueint / 118.85795 - 160
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_thermal_limit(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x15')
value = self.characteristicRemote.read()
valueint = struct.unpack("<H", value)[0]
return valueint / 118.85795 - 160
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_watchdog_timeout(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x0e')
value = self.characteristicRemote.read()
return struct.unpack("<B", value)[0] * 0.1
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_authentication_timeout(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x09')
value = self.characteristicRemote.read()
return struct.unpack("<B", value)[0] * 0.1
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_power_cycle_counter(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x28')
value = self.characteristicRemote.read()
return struct.unpack("<I", value)[0]
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_uptime(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x29')
value = self.characteristicRemote.read()
seconds = struct.unpack("<I", value)[0] * 0.1
minutes = seconds // 60
hours = minutes // 60
return "%02d:%02d:%02d" % (hours, minutes % 60, seconds % 60)
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_hardware_version(self):
try:
return self.SBrickPeripheral.readCharacteristic(0x000c).decode("utf-8")
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_software_version(self):
try:
return self.SBrickPeripheral.readCharacteristic(0x000a).decode("utf-8")
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_brick_id(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x0a')
value = self.characteristicRemote.read()
return "%0X %0X %0X %0X %0X %0X" % (
value[0], value[1], value[2], value[3], value[4], value[5])
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_need_authentication(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x02')
value = self.characteristicRemote.read()
return struct.unpack("<B", value)[0] == 1
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_is_authenticated(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x03')
value = self.characteristicRemote.read()
return struct.unpack("<B", value)[0] == 1
except BTLEException as ex:
self.emit("sbrick_disconnected_error", ex.message)
def get_user_id(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x04')
value = self.characteristicRemote.read()
return struct.unpack("<B", value)[0] == 1
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def authenticate_owner(self, password):
with self.lock:
try:
self.authenticated = False
cmd = bytearray([0x05, 0x00])
for ch in password:
cmd.append(ord(ch))
self.characteristicRemote.write(cmd)
self.authenticated = True
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def authenticate_guest(self, password):
with self.lock:
try:
self.authenticated = False
cmd = bytearray([0x05, 0x01])
for ch in password:
cmd.append(ord(ch))
self.characteristicRemote.write(cmd)
self.authenticated = True
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def clear_owner_password(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x06\x00')
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def clear_guest_password(self):
with self.lock:
try:
self.characteristicRemote.write(b'\x06\x01')
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def set_owner_password(self, password):
with self.lock:
try:
cmd = bytearray([0x07, 0x00])
for ch in password:
cmd.append(ord(ch))
self.characteristicRemote.write(cmd)
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def set_guest_password(self, password):
with self.lock:
try:
cmd = bytearray([0x07, 0x01])
for ch in password:
cmd.append(ord(ch))
self.characteristicRemote.write(cmd)
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
def set_authentication_timeout(self, seconds):
with self.lock:
try:
cmd = bytearray([0x08, seconds / 0.1])
self.characteristicRemote.write(cmd)
except BTLEException as ex:
self.emit("sbrick_error", ex.message)
GObject.type_register(SBrickCommunications)
GObject.signal_new("sbrick_connected", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE, ())
GObject.signal_new("sbrick_disconnected_error", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE,
[GObject.TYPE_STRING])
GObject.signal_new("sbrick_disconnected_ok", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE, ())
GObject.signal_new("sbrick_channel_stop", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE,
[GObject.TYPE_INT])
GObject.signal_new("sbrick_drive_sent", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE,
[GObject.TYPE_INT, GObject.TYPE_INT])
GObject.signal_new("sbrick_error", SBrickCommunications, GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE,
[GObject.TYPE_STRING])
| apache-2.0 | 3,421,896,256,226,806,300 | 35.725971 | 119 | 0.556378 | false |
howard11/autodownload | schedule.download.py | 1 | 1520 | #!/usr/bin/python3
import sys, os, re
import dao, constants, zimuzu, boxdownload
from requests import Request, Session
sys.stdout = open("/root/autodownloadjob/schedule.download.log", encoding='utf-8', mode='w')
sys.stderr = open("/root/autodownloadjob/schedule.download.error.log", encoding='utf-8', mode='w')
def download_for_zimuzu(tv):
print('[%s] from S%sE%s' % (tv['name'], tv['start_season'], tv['start_episode']))
series_list = zimuzu.fetch_all_series_for_zimuzu(tv)
headers = boxdownload.find_box_http_header()
if not series_list or not headers:
return
for series in series_list:
groups = re.compile(r'\.S(\d{1,3}).*E(\d{1,3})\.').search(series['title'])
if not groups:
continue
season = groups.group(1)
episode = groups.group(2)
if(int(tv['start_season']) > int(season) or int(tv['start_episode']) >= int(episode)):
continue
print(' - Add S%sE%s' % (season, episode))
if boxdownload.send_download_link_to_box(headers, series['link']):
dao.save_series(tv['id'], season, episode, series['link'], series['title'], constants.DOWLOADING_FLAG)
dao.update_tv_series_progress(tv['id'], season, episode)
def main():
if not os.path.isfile(constants.sqlite_file):
dao.create_db()
else:
print('Using database: %s' % constants.sqlite_file)
tv_list = dao.list_tv()
if not tv_list:
print('No TV found in db')
return
else:
print('Checking %s TVs' % len(tv_list))
for tv in tv_list:
if tv and tv['url'] and 'zimuzu.tv' in tv['url']:
download_for_zimuzu(tv)
main()
| mit | -1,746,285,889,057,662,200 | 30.666667 | 105 | 0.676316 | false |
weso/landportal-importers | LandPortalEntities/lpentities/observation.py | 1 | 2674 | '''
Created on 18/12/2013
@author: Nacho
'''
from lpentities.computation import Computation
from lpentities.dataset import Dataset
from lpentities.indicator import Indicator
from lpentities.value import Value
class Observation(object):
'''
classdocs
'''
def __init__(self, chain_for_id, int_for_id, ref_time=None, issued=None,
computation=None, value=None, indicator=None, dataset=None):
'''
Constructor
'''
self.ref_time = ref_time
self.issued = issued
self._computation = computation
self._value = value
self._indicator = indicator
self._dataset = dataset
self.group = None
self.indicator_group = None
self.observation_id = self._generate_id(chain_for_id, int_for_id)
def __get_computation(self):
return self._computation
def __set_computation(self, computation):
if isinstance(computation, Computation) :
self._computation = computation
else:
raise ValueError("Expected Computation object in Observation")
computation = property(fget=__get_computation,
fset=__set_computation,
doc="The computation for the observation")
def __get_value(self):
return self._value
def __set_value(self, value):
if isinstance(value, Value) :
self._value = value
else:
raise ValueError("Expected Value object in Observation")
value = property(fget=__get_value,
fset=__set_value,
doc="The value for the observation")
def __get_indicator(self):
return self._indicator
def __set_indicator(self, indicator):
if isinstance(indicator, Indicator) :
self._indicator = indicator
else:
raise ValueError("Expected Indicator object in Observation")
indicator = property(fget=__get_indicator,
fset=__set_indicator,
doc="The indicator for the observation")
def __get_dataset(self):
return self._dataset
def __set_dataset(self, dataset):
if isinstance(dataset, Dataset) :
self._dataset = dataset
else:
raise ValueError("Expected Dataset object in Observation")
dataset = property(fget=__get_dataset,
fset=__set_dataset,
doc="The dataset for the observation")
@staticmethod
def _generate_id(chain_for_id, int_for_id):
return "OBS" + chain_for_id.upper() + str(int_for_id).upper() | unlicense | 7,113,201,815,411,518,000 | 29.05618 | 77 | 0.582274 | false |
RemiFr82/ck_addons | ck_equipment/models/eqpt_trailer.py | 1 | 1582 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
TRAILER_TYPES = [('b','B'),
('be','BE')]
class Trailer(models.Model):
_name = 'eqpt.trailer'
_description = "Trailer equipment"
_inherits = {'eqpt.equipment':'eqpt_id'}
eqpt_id = fields.Many2one('eqpt.equipment')
eqpt_type = fields.Selection(selection=TRAILER_TYPES, string="TRailer type")
has_own_np = fields.Boolean(string="Has its own registration number")
numberplate = fields.Char(string="Numberplate")
owner_id = fields.Many2one('res.partner', string="Owner")
max_load = fields.Integer(string="Max Load (in kg)")
length = fields.Float(string="Length (in m)", digits=(1,2))
tires_ref = fields.Char(string="Tires reference")
tires_load = fields.Float(string="Tires pressure (in bar)", digits=(1,1), default=0.0)
crossbars = fields.Integer(string="Number of crossbars", default=2)
boats_per_bar = fields.Integer(string="Boats per bar", default=1)
max_boats = fields.Integer(string="Theoric max boats", compute="get_max_boats", store=True)
has_case = fields.Boolean(string="Has a case ?", default=False)
case_capacity = fields.Char(string="Case capacity")
vehicle_ids = fields.Many2many('eqpt.vehicle', string="Trailing vehicles")
_order = "max_load, max_boats desc"
@api.one
@api.depends('crossbars','boats_per_bar')
def get_max_boats(self):
if self.crossbars and self.boats_per_bar:
self.max_boats = self.crossbars * self.boats_per_bar
else:
self.max_boats = 0
| gpl-3.0 | -8,480,008,855,262,436,000 | 36.666667 | 95 | 0.654867 | false |
skosukhin/spack | var/spack/repos/builtin/packages/llvm/package.py | 1 | 19620 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Llvm(CMakePackage):
"""The LLVM Project is a collection of modular and reusable compiler and
toolchain technologies. Despite its name, LLVM has little to do
with traditional virtual machines, though it does provide helpful
libraries that can be used to build them. The name "LLVM" itself
is not an acronym; it is the full name of the project.
"""
homepage = 'http://llvm.org/'
url = 'http://llvm.org/releases/3.7.1/llvm-3.7.1.src.tar.xz'
list_url = 'http://releases.llvm.org/download.html'
family = 'compiler' # Used by lmod
# currently required by mesa package
version('3.0', 'a8e5f5f1c1adebae7b4a654c376a6005',
url='http://llvm.org/releases/3.0/llvm-3.0.tar.gz')
# NOTE: The debug version of LLVM is an order of magnitude larger than
# the release version, and may take up 20-30 GB of space. If you want
# to save space, build with `build_type=Release`.
variant('clang', default=True,
description="Build the LLVM C/C++/Objective-C compiler frontend")
variant('lldb', default=True, description="Build the LLVM debugger")
variant('internal_unwind', default=True,
description="Build the libcxxabi libunwind")
variant('polly', default=True,
description="Build the LLVM polyhedral optimization plugin, "
"only builds for 3.7.0+")
variant('libcxx', default=True,
description="Build the LLVM C++ standard library")
variant('compiler-rt', default=True,
description="Build LLVM compiler runtime, including sanitizers")
variant('gold', default=True,
description="Add support for LTO with the gold linker plugin")
variant('shared_libs', default=False,
description="Build all components as shared libraries, faster, "
"less memory to build, less stable")
variant('link_dylib', default=False,
description="Build and link the libLLVM shared library rather "
"than static")
variant('all_targets', default=True,
description="Build all supported targets, default targets "
"<current arch>,NVPTX,AMDGPU,CppBackend")
variant('build_type', default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
variant('python', default=False, description="Install python bindings")
extends('python', when='+python')
# Build dependency
depends_on('[email protected]:', type='build')
# Universal dependency
depends_on('[email protected]:2.8') # Seems not to support python 3.X.Y
depends_on('py-lit', type=('build', 'run'))
# lldb dependencies
depends_on('ncurses', when='+lldb')
depends_on('swig', when='+lldb')
depends_on('libedit', when='+lldb')
# gold support
depends_on('binutils+gold', when='+gold')
# polly plugin
depends_on('gmp', when='@:3.6.999 +polly')
depends_on('isl', when='@:3.6.999 +polly')
base_url = 'http://llvm.org/releases/%%(version)s/%(pkg)s-%%(version)s.src.tar.xz'
llvm_url = base_url % {'pkg': 'llvm'}
resources = {
'compiler-rt': {
'url': base_url % {'pkg': 'compiler-rt'},
'destination': 'projects',
'placement': 'compiler-rt',
'variant': '+compiler-rt',
},
'openmp': {
'url': base_url % {'pkg': 'openmp'},
'destination': 'projects',
'placement': 'openmp',
'variant': '+clang',
},
'libcxx': {
'url': base_url % {'pkg': 'libcxx'},
'destination': 'projects',
'placement': 'libcxx',
'variant': '+libcxx',
},
'libcxxabi': {
'url': base_url % {'pkg': 'libcxxabi'},
'destination': 'projects',
'placement': 'libcxxabi',
'variant': '+libcxx',
},
'cfe': {
'url': base_url % {'pkg': 'cfe'},
'destination': 'tools',
'placement': 'clang',
'variant': '+clang',
},
'clang-tools-extra': {
'url': base_url % {'pkg': 'clang-tools-extra'},
'destination': 'tools/clang/tools',
'placement': 'extra',
'variant': '+clang',
},
'lldb': {
'url': base_url % {'pkg': 'lldb'},
'destination': 'tools',
'placement': 'lldb',
'variant': '+lldb',
},
'polly': {
'url': base_url % {'pkg': 'polly'},
'destination': 'tools',
'placement': 'polly',
'variant': '+polly',
},
'libunwind': {
'url': base_url % {'pkg': 'libunwind'},
'destination': 'projects',
'placement': 'libunwind',
'variant': '+internal_unwind',
},
}
releases = [
{
'version': 'trunk',
'repo': 'http://llvm.org/svn/llvm-project/llvm/trunk',
'resources': {
'compiler-rt': 'http://llvm.org/svn/llvm-project/compiler-rt/trunk',
'openmp': 'http://llvm.org/svn/llvm-project/openmp/trunk',
'polly': 'http://llvm.org/svn/llvm-project/polly/trunk',
'libcxx': 'http://llvm.org/svn/llvm-project/libcxx/trunk',
'libcxxabi': 'http://llvm.org/svn/llvm-project/libcxxabi/trunk',
'cfe': 'http://llvm.org/svn/llvm-project/cfe/trunk',
'clang-tools-extra': 'http://llvm.org/svn/llvm-project/clang-tools-extra/trunk',
'lldb': 'http://llvm.org/svn/llvm-project/lldb/trunk',
'libunwind': 'http://llvm.org/svn/llvm-project/libunwind/trunk',
}
},
{
'version': '5.0.0',
'md5': '5ce9c5ad55243347ea0fdb4c16754be0',
'resources': {
'compiler-rt': 'da735894133589cbc6052c8ef06b1230',
'openmp': '8be33c0f0a7ed3aab42be2f63988913d',
'polly': 'dcbd08450e895a42f3986e2fe6524c92',
'libcxx': 'a39241a3c9b4d2b7ce1246b9f527b400',
'libcxxabi': '0158528a5a0ae1c74821bae2195ea782',
'cfe': '699c448c6d6d0edb693c87beb1cc8c6e',
'clang-tools-extra': '0cda05d1a61becb393eb63746963d7f5',
'lldb': '8de19973d044ca2cfe325d4625a5cfef',
'libunwind': '98fb2c677068c6f36727fb1d5397bca3',
}
},
{
'version': '4.0.1',
'md5': 'a818e70321b91e2bb2d47e60edd5408f',
'resources': {
'compiler-rt': '0227ac853ce422125f8bb08f6ad5c995',
'openmp': '23e5f720ae119165ba32627467fdc885',
'polly': '0d4a3fa2eb446a378bbf01b220851b1f',
'libcxx': 'c54f7938e2f393a2cead0af37ed99dfb',
'libcxxabi': '55ba0be7daf8bf25ab629a9cfd3022a4',
'cfe': 'a6c7b3e953f8b93e252af5917df7db97',
'clang-tools-extra': 'cfd46027a0ab7eed483dfcc803e86bd9',
'lldb': '908bdd777d3b527a914ba360477b8ab3',
'libunwind': 'b72ec95fb784e61f15d6196414b92f5e',
}
},
{
'version': '4.0.0',
'md5': 'ea9139a604be702454f6acf160b4f3a2',
'resources': {
'compiler-rt': '2ec11fb7df827b086341131c5d7f1814',
'openmp': '3d06d2801dd4808f551a1a70068e01f5',
'polly': 'f36e4e7cf872f8b3bbb9cdcddc5fd964',
'libcxx': '4cf7df466e6f803ec4611ee410ff6781',
'libcxxabi': '8b5d7b9bfcf7dec2dc901c8a6746f97c',
'cfe': '756e17349fdc708c62974b883bf72d37',
'clang-tools-extra': '99e711337ec3e9a8bb36e8dd62b2cd6e',
'lldb': 'bd41ba7fcca55d2a554409bbccd34d2d',
'libunwind': '0c3534eaa11c0cae33a1dcf5f36ce287',
}
},
{
'version': '3.9.1',
'md5': '3259018a7437e157f3642df80f1983ea',
'resources': {
'compiler-rt': 'aadc76e7e180fafb10fb729444e287a3',
'openmp': 'f076916bf2f49229b4df9fa0bb002599',
'polly': '2cc7fe2bd9539775ba140abfd375bec6',
'libcxx': '75a3214224301fc543fa6a38bdf7efe0',
'libcxxabi': '62fd584b38cc502172c2ffab041b5fcc',
'cfe': '45713ec5c417ed9cad614cd283d786a1',
'clang-tools-extra': '1a01d545a064fcbc46a2f05f6880d3d7',
'lldb': '91399402f287d3f637db1207113deecb',
'libunwind': 'f273dd0ed638ad0601b23176a36f187b',
}
},
{
'version': '3.9.0',
'md5': 'f2093e98060532449eb7d2fcfd0bc6c6',
'resources': {
'compiler-rt': 'b7ea34c9d744da16ffc0217b6990d095',
'openmp': '5390164f2374e1444e82393541ecf6c7',
'polly': '1cf328cbae25267749b68cfa6f113674',
'libcxx': '0a11efefd864ce6f321194e441f7e569',
'libcxxabi': 'd02642308e22e614af6b061b9b4fedfa',
'cfe': '29e1d86bee422ab5345f5e9fb808d2dc',
'clang-tools-extra': 'f4f663068c77fc742113211841e94d5e',
'lldb': '968d053c3c3d7297983589164c6999e9',
'libunwind': '3e5c87c723a456be599727a444b1c166',
}
},
{
'version': '3.8.1',
'md5': '538467e6028bbc9259b1e6e015d25845',
'resources': {
'compiler-rt': 'f140db073d2453f854fbe01cc46f3110',
'openmp': '078b8d4c51ad437a4f8b5989f5ec4156',
'polly': '8a40e697a4ba1c8b640b85d074bd6e25',
'libcxx': '1bc60150302ff76a0d79d6f9db22332e',
'libcxxabi': '3c63b03ba2f30a01279ca63384a67773',
'cfe': '4ff2f8844a786edb0220f490f7896080',
'clang-tools-extra': '6e49f285d0b366cc3cab782d8c92d382',
'lldb': '9e4787b71be8e432fffd31e13ac87623',
'libunwind': 'd66e2387e1d37a8a0c8fe6a0063a3bab',
}
},
{
'version': '3.8.0',
'md5': '07a7a74f3c6bd65de4702bf941b511a0',
'resources': {
'compiler-rt': 'd6fcbe14352ffb708e4d1ac2e48bb025',
'openmp': '8fd7cc35d48051613cf1e750e9f22e40',
'polly': '1b3b20f52d34a4024e21a4ea7112caa7',
'libcxx': 'd6e0bdbbee39f7907ad74fd56d03b88a',
'libcxxabi': 'bbe6b4d72c7c5978550d370af529bcf7',
'cfe': 'cc99e7019bb74e6459e80863606250c5',
'clang-tools-extra': 'c2344f50e0eea0b402f0092a80ddc036',
'lldb': 'a5da35ed9cc8c8817ee854e3dbfba00e',
'libunwind': '162ade468607f153cca12be90b5194fa',
}
},
{
'version': '3.7.1',
'md5': 'bf8b3a2c79e61212c5409041dfdbd319',
'resources': {
'compiler-rt': '1c6975daf30bb3b0473b53c3a1a6ff01',
'openmp': 'b4ad08cda4e5c22e42b66062b140438e',
'polly': '3a2a7367002740881637f4d47bca4dc3',
'libcxx': 'f9c43fa552a10e14ff53b94d04bea140',
'libcxxabi': '52d925afac9f97e9dcac90745255c169',
'cfe': '0acd026b5529164197563d135a8fd83e',
'clang-tools-extra': '5d49ff745037f061a7c86aeb6a24c3d2',
'lldb': 'a106d8a0d21fc84d76953822fbaf3398',
'libunwind': '814bd52c9247c5d04629658fbcb3ab8c',
}
},
{
'version': '3.7.0',
'md5': 'b98b9495e5655a672d6cb83e1a180f8e',
'resources': {
'compiler-rt': '383c10affd513026f08936b5525523f5',
'openmp': 'f482c86fdead50ba246a1a2b0bbf206f',
'polly': '32f93ffc9cc7e042df22089761558f8b',
'libcxx': '46aa5175cbe1ad42d6e9c995968e56dd',
'libcxxabi': '5aa769e2fca79fa5335cfae8f6258772',
'cfe': '8f9d27335e7331cf0a4711e952f21f01',
'clang-tools-extra': 'd5a87dacb65d981a427a536f6964642e',
'lldb': 'e5931740400d1dc3e7db4c7ba2ceff68',
'libunwind': '9a75392eb7eb8ed5c0840007e212baf5',
}
},
{
'version': '3.6.2',
'md5': '0c1ee3597d75280dee603bae9cbf5cc2',
'resources': {
'compiler-rt': 'e3bc4eb7ba8c39a6fe90d6c988927f3c',
'openmp': '65dd5863b9b270960a96817e9152b123',
'libcxx': '22214c90697636ef960a49aef7c1823a',
'libcxxabi': '17518e361e4e228f193dd91e8ef54ba2',
'cfe': 'ff862793682f714bb7862325b9c06e20',
'clang-tools-extra': '3ebc1dc41659fcec3db1b47d81575e06',
'lldb': '51e5eb552f777b950bb0ff326e60d5f0',
}
},
{
'version': '3.5.1',
'md5': '2d3d8004f38852aa679e5945b8ce0b14',
'resources': {
'compiler-rt': 'd626cfb8a9712cb92b820798ab5bc1f8',
'openmp': '121ddb10167d7fc38b1f7e4b029cf059',
'libcxx': '406f09b1dab529f3f7879f4d548329d2',
'libcxxabi': 'b22c707e8d474a99865ad3c521c3d464',
'cfe': '93f9532f8f7e6f1d8e5c1116907051cb',
'clang-tools-extra': 'f13f31ed3038acadc6fa63fef812a246',
'lldb': 'cc5ea8a414c62c33e760517f8929a204',
}
},
]
for release in releases:
if release['version'] == 'trunk':
version(release['version'], svn=release['repo'])
for name, repo in release['resources'].items():
resource(name=name,
svn=repo,
destination=resources[name]['destination'],
when='@%s%s' % (release['version'],
resources[name].get('variant', "")),
placement=resources[name].get('placement', None))
else:
version(release['version'], release['md5'], url=llvm_url % release)
for name, md5 in release['resources'].items():
resource(name=name,
url=resources[name]['url'] % release,
md5=md5,
destination=resources[name]['destination'],
when='@%s%s' % (release['version'],
resources[name].get('variant', "")),
placement=resources[name].get('placement', None))
conflicts('+clang_extra', when='~clang')
conflicts('+lldb', when='~clang')
# Github issue #4986
patch('llvm_gcc7.patch', when='@4.0.0:4.0.1+lldb %[email protected]:')
def setup_environment(self, spack_env, run_env):
spack_env.append_flags('CXXFLAGS', self.compiler.cxx11_flag)
def cmake_args(self):
spec = self.spec
cmake_args = [
'-DLLVM_REQUIRES_RTTI:BOOL=ON',
'-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp',
'-DPYTHON_EXECUTABLE:PATH={0}'.format(spec['python'].command.path),
]
if '+gold' in spec:
cmake_args.append('-DLLVM_BINUTILS_INCDIR=' +
spec['binutils'].prefix.include)
if '+polly' in spec:
cmake_args.append('-DLINK_POLLY_INTO_TOOLS:Bool=ON')
else:
cmake_args.extend(['-DLLVM_EXTERNAL_POLLY_BUILD:Bool=OFF',
'-DLLVM_TOOL_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_LINK_INTO_TOOLS:Bool=OFF'])
if '+clang' not in spec:
cmake_args.append('-DLLVM_EXTERNAL_CLANG_BUILD:Bool=OFF')
if '+lldb' not in spec:
cmake_args.extend(['-DLLVM_EXTERNAL_LLDB_BUILD:Bool=OFF',
'-DLLVM_TOOL_LLDB_BUILD:Bool=OFF'])
if '+internal_unwind' not in spec:
cmake_args.append('-DLLVM_EXTERNAL_LIBUNWIND_BUILD:Bool=OFF')
if '+libcxx' in spec:
if spec.satisfies('@3.9.0:'):
cmake_args.append('-DCLANG_DEFAULT_CXX_STDLIB=libc++')
else:
cmake_args.append('-DLLVM_EXTERNAL_LIBCXX_BUILD:Bool=OFF')
cmake_args.append('-DLLVM_EXTERNAL_LIBCXXABI_BUILD:Bool=OFF')
if '+compiler-rt' not in spec:
cmake_args.append('-DLLVM_EXTERNAL_COMPILER_RT_BUILD:Bool=OFF')
if '+shared_libs' in spec:
cmake_args.append('-DBUILD_SHARED_LIBS:Bool=ON')
if '+link_dylib' in spec:
cmake_args.append('-DLLVM_LINK_LLVM_DYLIB:Bool=ON')
if '+all_targets' not in spec: # all is default on cmake
targets = ['CppBackend', 'NVPTX', 'AMDGPU']
if 'x86' in spec.architecture.target.lower():
targets.append('X86')
elif 'arm' in spec.architecture.target.lower():
targets.append('ARM')
elif 'aarch64' in spec.architecture.target.lower():
targets.append('AArch64')
elif 'sparc' in spec.architecture.target.lower():
targets.append('sparc')
elif ('ppc' in spec.architecture.target.lower() or
'power' in spec.architecture.target.lower()):
targets.append('PowerPC')
cmake_args.append(
'-DLLVM_TARGETS_TO_BUILD:Bool=' + ';'.join(targets))
if spec.satisfies('@4.0.0:') and spec.satisfies('platform=linux'):
cmake_args.append('-DCMAKE_BUILD_WITH_INSTALL_RPATH=1')
return cmake_args
@run_before('build')
def pre_install(self):
with working_dir(self.build_directory):
# When building shared libraries these need to be installed first
make('install-LLVMTableGen')
make('install-LLVMDemangle')
make('install-LLVMSupport')
@run_after('install')
def post_install(self):
if '+clang' in self.spec and '+python' in self.spec:
install_tree(
'tools/clang/bindings/python/clang',
join_path(site_packages_dir, 'clang'))
with working_dir(self.build_directory):
install_tree('bin', join_path(self.prefix, 'libexec', 'llvm'))
| lgpl-2.1 | 9,089,586,911,029,377,000 | 42.892617 | 96 | 0.558818 | false |
HybridF5/jacket | jacket/tests/storage/unit/test_volume_configuration.py | 1 | 2008 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the configuration wrapper in volume drivers."""
from oslo_config import cfg
from jacket.storage import test
from jacket.storage.volume import configuration
volume_opts = [
cfg.StrOpt('str_opt', default='STR_OPT'),
cfg.BoolOpt('bool_opt', default=False)
]
more_volume_opts = [
cfg.IntOpt('int_opt', default=1),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
CONF.register_opts(more_volume_opts)
class VolumeConfigurationTest(test.TestCase):
def test_group_grafts_opts(self):
c = configuration.Configuration(volume_opts, config_group='foo')
self.assertEqual(c.str_opt, CONF.foo.str_opt)
self.assertEqual(c.bool_opt, CONF.foo.bool_opt)
def test_opts_no_group(self):
c = configuration.Configuration(volume_opts)
self.assertEqual(c.str_opt, CONF.str_opt)
self.assertEqual(c.bool_opt, CONF.bool_opt)
def test_grafting_multiple_opts(self):
c = configuration.Configuration(volume_opts, config_group='foo')
c.append_config_values(more_volume_opts)
self.assertEqual(c.str_opt, CONF.foo.str_opt)
self.assertEqual(c.bool_opt, CONF.foo.bool_opt)
self.assertEqual(c.int_opt, CONF.foo.int_opt)
def test_safe_get(self):
c = configuration.Configuration(volume_opts, config_group='foo')
self.assertIsNone(c.safe_get('none_opt'))
| apache-2.0 | -9,137,915,361,235,510,000 | 33.033898 | 78 | 0.701195 | false |
stepbot/resiliant-trader | robinhood.py | 1 | 12260 | ##############################
# Robinhood API based on https://github.com/Jamonek/Robinhood.git
# refence available at https://github.com/sanko/Robinhood.git
##############################
import getpass
import requests
from datetime import datetime
import numpy as np
class Robinhood:
"""wrapper class for fetching/parsing Robinhood endpoints"""
endpoints = {
"login": "https://api.robinhood.com/api-token-auth/",
"logout": "https://api.robinhood.com/api-token-logout/",
"investment_profile": "https://api.robinhood.com/user/investment_profile/",
"accounts": "https://api.robinhood.com/accounts/",
"ach_iav_auth": "https://api.robinhood.com/ach/iav/auth/",
"ach_relationships": "https://api.robinhood.com/ach/relationships/",
"ach_transfers": "https://api.robinhood.com/ach/transfers/",
"applications": "https://api.robinhood.com/applications/",
"dividends": "https://api.robinhood.com/dividends/",
"edocuments": "https://api.robinhood.com/documents/",
"instruments": "https://api.robinhood.com/instruments/",
"margin_upgrades": "https://api.robinhood.com/margin/upgrades/",
"markets": "https://api.robinhood.com/markets/",
"notifications": "https://api.robinhood.com/notifications/",
"orders": "https://api.robinhood.com/orders/",
"password_reset": "https://api.robinhood.com/password_reset/request/",
"portfolios": "https://api.robinhood.com/portfolios/",
"positions": "https://api.robinhood.com/positions/",
"quotes": "https://api.robinhood.com/quotes/",
"historicals": "https://api.robinhood.com/quotes/historicals/",
"document_requests": "https://api.robinhood.com/upload/document_requests/",
"user": "https://api.robinhood.com/user/",
"watchlists": "https://api.robinhood.com/watchlists/",
"news": "https://api.robinhood.com/midlands/news/",
"fundamentals": "https://api.robinhood.com/fundamentals/",
}
session = None
username = None
password = None
headers = None
auth_token = None
##############################
#Logging in and initializing
##############################
def __init__(self):
self.session = requests.session()
self.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
}
self.session.headers = self.headers
def login_prompt(self): #pragma: no cover
"""Prompts user for username and password and calls login()."""
username = input("Username: ")
password = getpass.getpass()
return self.login(username=username, password=password)
def login(
self,
username,
password,
mfa_code=None
):
"""save and test login info for Robinhood accounts
Args:
username (str): username
password (str): password
Returns:
(bool): received valid auth token
"""
self.username = username
self.password = password
payload = {
'password': self.password,
'username': self.username
}
if mfa_code:
payload['mfa_code'] = mfa_code
try:
res = self.session.post(
self.endpoints['login'],
data=payload
)
res.raise_for_status()
data = res.json()
except requests.exceptions.HTTPError:
raise RH_exception.LoginFailed()
if 'mfa_required' in data.keys(): #pragma: no cover
raise RH_exception.TwoFactorRequired() #requires a second call to enable 2FA
if 'token' in data.keys():
self.auth_token = data['token']
self.headers['Authorization'] = 'Token ' + self.auth_token
return True
return False
def logout(self):
"""logout from Robinhood
Returns:
(:obj:`requests.request`) result from logout endpoint
"""
flag = False
try:
req = self.session.post(self.endpoints['logout'])
req.raise_for_status()
except requests.exceptions.HTTPError as err_msg:
warnings.warn('Failed to log out ' + repr(err_msg))
self.headers['Authorization'] = None
self.auth_token = None
if req.status_code == 200:
flag = True
return flag
##############################
#GET DATA
##############################
def marketOpenCheck(self):
canTrade = True
now = datetime.utcnow()
url = self.endpoints['markets']
marketData = (self.get_url(url)['results'])
for market in marketData:
marketTimeData = self.get_url(market['todays_hours'])
status = marketTimeData['is_open']
if status == False:
canTrade = False
print('is_open flag not true')
else:
print('is_open flag true')
openTime = marketTimeData['opens_at']
openTimeObject = datetime.strptime(openTime,'%Y-%m-%dT%H:%M:%SZ')
closeTime = marketTimeData['closes_at']
closeTimeObject= datetime.strptime(closeTime,'%Y-%m-%dT%H:%M:%SZ')
if not (status == False):
if now < openTimeObject:
canTrade = False
print('time before open')
if now > closeTimeObject:
canTrade = False
print('time after close')
return canTrade
def instruments(self, stock):
"""fetch instruments endpoint
Args:
stock (str): stock ticker
Returns:
(:obj:`dict`): JSON contents from `instruments` endpoint
"""
res = self.session.get(
self.endpoints['instruments'],
params={'query': stock.upper()}
)
res.raise_for_status()
res = res.json()
# if requesting all, return entire object so may paginate with ['next']
if (stock == ""):
return res
return res['results']
def get_url(self, url):
"""flat wrapper for fetching URL directly"""
return self.session.get(url).json()
def get_historical_quote(
self,
stock,
interval,
span
):
"""fetch historical data for stock
Note: valid interval/span configs
interval = 5minute | 10minute + span = day, week
interval = day + span = year
interval = week
TODO: NEEDS TESTS
Args:
stock (str): stock ticker
interval (str): resolution of data
span (str): length of data
Returns:
(:obj:`ndarray`) values returned from `historicals` endpoint
columns: open_price, low_price, high_price, close_price, mean_price, volume
"""
params = {
'symbols': stock,
'interval': interval,
'span': span,
'bounds': 'regular'
}
res = self.session.get(
self.endpoints['historicals'],
params=params
)
rawHistoricals = ((res.json()['results'])[0])['historicals']
numpyHistoricals = np.zeros((len(rawHistoricals),6))
ii = 0
for bar in rawHistoricals:
numpyHistoricals[ii,0] = float(bar['open_price'])
numpyHistoricals[ii,1] = float(bar['low_price'])
numpyHistoricals[ii,2] = float(bar['high_price'])
numpyHistoricals[ii,3] = float(bar['close_price'])
numpyHistoricals[ii,4] = (float(bar['open_price'])+float(bar['low_price'])+float(bar['high_price'])+float(bar['close_price']))/4
numpyHistoricals[ii,5] = float(bar['volume'])
ii = ii+1
return numpyHistoricals
def quote_data(self, stock=''):
"""fetch stock quote
Args:
stock (str): stock ticker, prompt if blank
Returns:
(:obj:`dict`): JSON contents from `quotes` endpoint
"""
url = None
if stock.find(',') == -1:
url = str(self.endpoints['quotes']) + str(stock) + "/"
else:
url = str(self.endpoints['quotes']) + "?symbols=" + str(stock)
#Check for validity of symbol
try:
req = requests.get(url)
req.raise_for_status()
data = req.json()
except requests.exceptions.HTTPError:
raise NameError('Invalid Symbol: ' + stock) #TODO: custom exception
return data
def ask_price(self, stock=''):
"""get asking price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): ask price
"""
data = self.quote_data(stock)
return float(data['ask_price'])
def bid_price(self, stock=''):
"""get bid price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): bid price
"""
data = self.quote_data(stock)
return float(data['bid_price'])
def get_account(self):
"""fetch account information
Returns:
(:obj:`dict`): `accounts` endpoint payload
"""
res = self.session.get(self.endpoints['accounts'])
res.raise_for_status() #auth required
res = res.json()
return res['results'][0]
##############################
# PORTFOLIOS DATA
##############################
def portfolios(self):
"""Returns the user's portfolio data."""
req = self.session.get(self.endpoints['portfolios'])
req.raise_for_status()
return req.json()['results'][0]
def adjusted_equity_previous_close(self):
"""wrapper for portfolios
get `adjusted_equity_previous_close` value
"""
return float(self.portfolios()['adjusted_equity_previous_close'])
def equity(self):
"""wrapper for portfolios
get `equity` value
"""
return float(self.portfolios()['equity'])
##############################
# POSITIONS DATA
##############################
def securities_owned(self):
"""
Returns a list of symbols of securities of which there are more
than zero shares in user's portfolio.
"""
return self.session.get(self.endpoints['positions']+'?nonzero=true').json()
##############################
#PLACE ORDER
##############################
def check_order_status(self,url):
orderOutcomeDictionary = {
'queued':'unresolved',
'unconfirmed':'unresolved',
'confirmed':'unresolved',
'partially_filled':'unresolved',
'filled':'success',
'rejected':'failure',
'canceled':'failure',
'failed':'failure'
}
orderResponse = self.get_url(url)
return orderOutcomeDictionary[orderResponse['state']]
def place_immediate_market_order(self,instrument,symbol,time_in_force,quantity,side,price=0.0):
payload = {
'account': self.get_account()['url'],
'instrument': instrument,
'quantity': quantity,
'side': side,
'symbol': symbol,
'time_in_force': time_in_force,
'trigger': 'immediate',
'type': 'market'
}
if side == 'buy':
payload['price']=price
res = self.session.post(
self.endpoints['orders'],
data=payload
)
return res.json()
| mit | -2,829,565,844,686,962,700 | 31.606383 | 140 | 0.530343 | false |
EvoluxBR/txthoonk | txthoonk/client.py | 1 | 14839 | from zope.interface import implements #@UnresolvedImport
from twisted.internet.protocol import ReconnectingClientFactory
from txredis.protocol import Redis, RedisSubscriber, defer
from twisted.internet import interfaces
import uuid
import itertools
from txthoonk.types import Feed
try:
from collection import OrderedDict
except ImportError:
OrderedDict = dict
class FeedExists(Exception):
pass
class FeedDoesNotExist(Exception):
pass
class ThoonkBase(object):
"""
Thoonk object base class.
"""
SEPARATOR = "\x00"
implements(interfaces.IProtocol)
def __init__(self, redis):
'''
Constructor
@param redis: the txredis instance
'''
self.set_redis(redis)
self._uuid = uuid.uuid4().hex
def set_redis(self, redis):
'''
Set the txredis instance
@param redis: the txredis instance
'''
self.redis = redis
def dataReceived(self, data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
self.redis.dataReceived(data)
def connectionLost(self, reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
self.redis.connectionLost(reason)
def makeConnection(self, transport):
"""
Make a connection to a transport and a server.
"""
self.redis.makeConnection(transport)
def connectionMade(self):
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
self.redis.connectionMade()
class ThoonkPub(ThoonkBase):
'''
Thoonk publisher class
'''
redis = Redis() # pydev: force code completion
def __init__(self, *args, **kwargs):
self.feed = self._get_feed_type(Feed, type_="feed")
super(ThoonkPub, self).__init__(*args, **kwargs)
def _get_feed_type(self, kls, type_):
'''
Returns a function in order to generate a specific feed type
@param kls: the python class of feed
@param type_: the type of feed to be stored in.
'''
config = {'type': type_}
def _create_type(feed_name):
'''
Creates a new feed of this type.
@param feed_name: the name of the feed.
'''
def _get_feed(*args):
"""Create a new a new instance of passed class"""
return kls(pub=self, name=feed_name)
def _exists(ret):
"""
Called when self.feed_exists returns
"""
if ret:
return _get_feed()
d = self.create_feed(feed_name, config)
d.addCallback(_get_feed)
return d
return self.feed_exists(feed_name).addCallback(_exists)
return _create_type
def _publish_channel(self, channel, *args):
"""Calls self.publish_channel appending self._uuid at end"""
args = list(args) + [self._uuid]
return self.publish_channel(channel, *args)
def publish_channel(self, channel, *args):
'''
Publish on channel.
@param channel: the channel where message will be published
@param *args: a list that will compose the message
'''
message = self.SEPARATOR.join(args)
return self.redis.publish(channel, message)
def create_feed(self, feed_name, config={}):
"""
Create a new feed with a given configuration.
The configuration is a dict, and should include a 'type'
entry with the class of the feed type implementation.
@param feed_name: The name of the new feed.
@param config: A dictionary of configuration values.
"""
def _set_config(ret):
'''
Called when self._publish_channel returns.
'''
return self.set_config(feed_name, config)
def _publish(ret):
"""
Called when redis.sadd returns.
"""
if ret == 1:
d = self._publish_channel("newfeed", feed_name)
d.addCallback(_set_config)
return d
else:
return defer.fail(FeedExists())
return self.redis.sadd("feeds", feed_name).addCallback(_publish)
def delete_feed(self, feed_name):
"""
Delete a given feed.
@param feed_name: The name of the feed.
"""
hash_feed_config = "feed.config:%s" % feed_name
def _exec_check(bulk_result):
# All defers must be succeed
assert all([a[0] for a in bulk_result])
# assert number of commands
assert len(bulk_result) == 7
multi_result = bulk_result[-1][1]
if multi_result:
# transaction done :D
# assert number commands in transaction
assert len(multi_result) == 3
# check if feed_name existed when was deleted
exists = multi_result[0]
if not exists:
return defer.fail(FeedDoesNotExist())
return True
# transaction fail :-(
# repeat it
return self.delete_feed(feed_name)
defers = []
# issue all commands in order to avoid concurrent calls
defers.append(self.redis.watch("feeds")) #0
defers.append(self.redis.watch(hash_feed_config)) #1
# begin transaction
defers.append(self.redis.multi()) #2
defers.append(self.redis.srem("feeds", feed_name)) #3 - #0
defers.append(self.redis.delete(hash_feed_config)) #4 - #1
defers.append(self._publish_channel("delfeed", feed_name)) #5 - #2
# end transaction
defers.append(self.redis.execute()) #6
return defer.DeferredList(defers).addCallback(_exec_check)
def feed_exists(self, feed_name):
"""
Check if a given feed exists.
@param feed_name: The name of the feed.
"""
return self.redis.sismember("feeds", feed_name)
def set_config(self, feed_name, config):
"""
Set the configuration for a given feed.
@param feed_name: The name of the feed.
@param config: A dictionary of configuration values.
"""
def _exists(ret):
if not ret:
return defer.fail(FeedDoesNotExist())
dl = []
for k, v in config.items():
dl.append(self.redis.hset('feed.config:%s' % feed_name, k, v))
return defer.DeferredList(dl)
return self.feed_exists(feed_name).addCallback(_exists)
def get_config(self, feed_name):
"""
Get the configuration for a given feed.
@param feed_name: The name of the feed.
@return: A defer witch callback function will have a config dict
as the first argument
"""
def _exists(ret):
if not ret:
return defer.fail(FeedDoesNotExist())
return self.redis.hgetall('feed.config:%s' % feed_name)
return self.feed_exists(feed_name).addCallback(_exists)
def get_feed_names(self):
"""
Return the set of known feeds.
@return: a defer witch callback function will have the set result
as first argument
"""
return self.redis.smembers("feeds")
class ThoonkPubFactory(ReconnectingClientFactory):
'''
ThoonkPub Factory
'''
protocol = Redis
protocol_wrapper = ThoonkPub
def __init__(self, *args, **kwargs):
'''
Constructor
'''
self._args = args
self._kwargs = kwargs
def buildProtocol(self, addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
redis = self.protocol(*self._args, **self._kwargs)
self.resetDelay()
return self.protocol_wrapper(redis)
class ThoonkSub(ThoonkBase):
'''
Thoonk Subscriber class.
'''
redis = RedisSubscriber() # pydev: force code completion
def __init__(self, redis):
'''
Constructor
@param redis: the txredis instance
'''
self._handlers = {'id_gen': itertools.count(), #@UndefinedVariable
'channel_handlers': {},
'id2channel' : {}}
# delay subscribe
self._subscribed = {'running': False,
'subscribed': {},
'running_for': None,
'defer': None}
super(ThoonkSub, self).__init__(redis)
def _get_sub_channel_cb(self, channel):
'''
Returns a callback in order to subscribe one channel.
@param channel: the desired channel.
'''
return lambda arg: self._sub_channel(channel)
def _evt2channel(self, evt):
'''
Convert Thoonk.py channels in compatible events
@param evt: the event
'''
# Thoonk.py compatible events
channel = evt
if evt == "create":
channel = "newfeed"
elif evt == "delete":
channel = "delfeed"
return channel
def _sub_channel(self, channel):
"""
Subscribe to a channel using a defer.
This call will queue channel subscriptions.
@param channel: the desired channel.
"""
if self._subscribed['subscribed'].get(channel):
# already subcribed
return defer.succeed(True)
if self._subscribed['running']:
# call it later, queue it
d = self._subscribed['defer']
d.addCallback(self._get_sub_channel_cb(channel))
return d
def set_subscribed(*args):
'''
Called when channel was subscribed.
'''
self._subscribed['running'] = False
self._subscribed['subscribed'][channel] = True
return True
self._subscribed['running'] = True
self.redis.subscribe(channel)
d = defer.Deferred()
self._subscribed['defer'] = d
self._subscribed['running_for'] = channel
return d.addCallback(set_subscribed)
def set_redis(self, redis):
'''
Set the txredis instance
@param redis: the txredis instance
'''
# FIXME: on (re)connect (re)subscribe all channels
redis.messageReceived = self.messageReceived
redis.channelSubscribed = self.channelSubscribed
super(ThoonkSub, self).set_redis(redis)
def register_handler(self, evt, handler):
"""
Register a function to respond to feed events.
Event types/handler params:
- create handler(feedname)
- newfeed handler(feedname)
- delete handler(feedname)
- delfeed handler(feedname)
- feed.publish:[feed] handler(id, item)
- feed.retract:[feed] handler(id)
- feed.edit:[feed] handler(id, item)
@param evt: The name of the feed event.
@param handler: The function for handling the event.
"""
channel = self._evt2channel(evt)
if not channel:
return defer.succeed(None)
def _register_callback(*args):
"""
Called when channel was subscribed.
"""
id_ = self._handlers['id_gen'].next()
# store map id -> channel
self._handlers['id2channel'][id_] = channel
handlers = self._handlers['channel_handlers'].get(channel)
if not handlers:
handlers = self._handlers['channel_handlers'][channel] = OrderedDict()
# store handler
handlers[id_] = handler
return id_
return self._sub_channel(channel).addCallback(_register_callback)
def remove_handler(self, id_):
"""
Unregister a function that was registered via register_handler
@param id_: the handler id
"""
channel = self._handlers['id2channel'].get(id_)
if not channel:
return
del self._handlers['channel_handlers'][channel][id_]
del self._handlers['id2channel'][id_]
def messageReceived(self, channel, message):
"""
Called when this connection is subscribed to a channel that
has received a message published on it.
"""
handlers = self._handlers['channel_handlers'].get(channel)
if handlers is None:
return
for handler in handlers.values():
args = message.split(self.SEPARATOR)
handler(*args)
def channelSubscribed(self, channel, numSubscriptions):
"""
Called when a channel is subscribed to.
"""
assert self._subscribed['running']
assert self._subscribed['running_for'] == channel
d = self._subscribed['defer']
d.callback(True)
class ThoonkSubFactory(ThoonkPubFactory):
'''
ThoonkSub Factory class.
'''
protocol = RedisSubscriber
protocol_wrapper = ThoonkSub
| mit | -747,066,772,516,353,300 | 29.659091 | 86 | 0.571602 | false |
kaday/rose | lib/python/rose/config_editor/valuewidget/text.py | 1 | 5716 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import pygtk
pygtk.require('2.0')
import gtk
import rose.config_editor
import rose.config_editor.valuewidget
import rose.env
import rose.gtk.util
ENV_COLOUR = rose.gtk.util.color_parse(
rose.config_editor.COLOUR_VARIABLE_TEXT_VAL_ENV)
class RawValueWidget(gtk.HBox):
"""This class generates a basic entry widget for an unformatted value."""
def __init__(self, value, metadata, set_value, hook, arg_str=None):
super(RawValueWidget, self).__init__(homogeneous=False, spacing=0)
self.value = value
self.metadata = metadata
self.set_value = set_value
self.hook = hook
self.entry = gtk.Entry()
insensitive_colour = gtk.Style().bg[0]
self.entry.modify_bg(gtk.STATE_INSENSITIVE, insensitive_colour)
self.normal_colour = gtk.Style().fg[gtk.STATE_NORMAL]
if rose.env.contains_env_var(self.value):
self.entry.modify_text(gtk.STATE_NORMAL, ENV_COLOUR)
self.entry.set_tooltip_text(rose.config_editor.VAR_WIDGET_ENV_INFO)
self.entry.set_text(self.value)
self.entry.connect("button-release-event",
self._handle_middle_click_paste)
self.entry.connect_after("paste-clipboard", self.setter)
self.entry.connect_after("key-release-event",
lambda e, v: self.setter(e))
self.entry.connect_after("button-release-event",
lambda e, v: self.setter(e))
self.entry.show()
self.pack_start(self.entry, expand=True, fill=True, padding=0)
self.entry.connect('focus-in-event',
self.hook.trigger_scroll)
self.grab_focus = lambda: self.hook.get_focus(self.entry)
def setter(self, widget, *args):
new_value = widget.get_text()
if new_value == self.value:
return False
self.value = new_value
self.set_value(self.value)
if rose.env.contains_env_var(self.value):
self.entry.modify_text(gtk.STATE_NORMAL, ENV_COLOUR)
self.entry.set_tooltip_text(rose.config_editor.VAR_WIDGET_ENV_INFO)
else:
self.entry.set_tooltip_text(None)
return False
def get_focus_index(self):
"""Return the cursor position within the variable value."""
return self.entry.get_position()
def set_focus_index(self, focus_index=None):
if focus_index is None:
return False
self.entry.set_position(focus_index)
def _handle_middle_click_paste(self, widget, event):
if event.button == 2:
self.setter(widget)
return False
class TextMultilineValueWidget(gtk.HBox):
"""This class displays text with multiple lines."""
def __init__(self, value, metadata, set_value, hook, arg_str=None):
super(TextMultilineValueWidget, self).__init__(homogeneous=False,
spacing=0)
self.value = value
self.metadata = metadata
self.set_value = set_value
self.hook = hook
self.entry_scroller = gtk.ScrolledWindow()
self.entry_scroller.set_shadow_type(gtk.SHADOW_IN)
self.entry_scroller.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_NEVER)
self.entry_scroller.show()
self.entrybuffer = gtk.TextBuffer()
self.entrybuffer.set_text(self.value)
self.entry = gtk.TextView(self.entrybuffer)
self.entry.set_wrap_mode(gtk.WRAP_WORD)
self.entry.set_left_margin(rose.config_editor.SPACING_SUB_PAGE)
self.entry.set_right_margin(rose.config_editor.SPACING_SUB_PAGE)
self.entry.connect('focus-in-event', self.hook.trigger_scroll)
self.entry.show()
self.entry_scroller.add(self.entry)
self.grab_focus = lambda: self.hook.get_focus(self.entry)
self.entrybuffer.connect('changed', self.setter)
self.pack_start(self.entry_scroller, expand=True, fill=True)
def get_focus_index(self):
"""Return the cursor position within the variable value."""
mark = self.entrybuffer.get_insert()
iter_ = self.entrybuffer.get_iter_at_mark(mark)
return iter_.get_offset()
def set_focus_index(self, focus_index=None):
"""Set the cursor position within the variable value."""
if focus_index is None:
return False
iter_ = self.entrybuffer.get_iter_at_offset(focus_index)
self.entrybuffer.place_cursor(iter_)
def setter(self, widget):
text = widget.get_text(widget.get_start_iter(),
widget.get_end_iter())
if text != self.value:
self.value = text
self.set_value(self.value)
return False
| gpl-3.0 | -9,203,306,685,464,285,000 | 39.253521 | 79 | 0.615815 | false |
bmentges/django-cart | cart/models.py | 1 | 2340 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
class Cart(models.Model):
creation_date = models.DateTimeField(verbose_name=_('creation date'))
checked_out = models.BooleanField(default=False, verbose_name=_('checked out'))
class Meta:
verbose_name = _('cart')
verbose_name_plural = _('carts')
ordering = ('-creation_date',)
def __unicode__(self):
return unicode(self.creation_date)
class ItemManager(models.Manager):
def get(self, *args, **kwargs):
if 'product' in kwargs:
kwargs['content_type'] = ContentType.objects.get_for_model(type(kwargs['product']))
kwargs['object_id'] = kwargs['product'].pk
del(kwargs['product'])
return super(ItemManager, self).get(*args, **kwargs)
def filter(self, *args, **kwargs):
if 'product' in kwargs:
kwargs['content_type'] = ContentType.objects.get_for_model(type(kwargs['product']))
kwargs['object_id'] = kwargs['product'].pk
del(kwargs['product'])
return super(ItemManager, self).filter(*args, **kwargs)
class Item(models.Model):
cart = models.ForeignKey(Cart, verbose_name=_('cart'), on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(verbose_name=_('quantity'))
unit_price = models.DecimalField(max_digits=18, decimal_places=2, verbose_name=_('unit price'))
# product as generic relation
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
objects = ItemManager()
class Meta:
verbose_name = _('item')
verbose_name_plural = _('items')
ordering = ('cart',)
def __unicode__(self):
return u'%d units of %s' % (self.quantity, self.product.__class__.__name__)
def total_price(self):
return self.quantity * self.unit_price
total_price = property(total_price)
# product
def get_product(self):
return self.content_type.get_object_for_this_type(pk=self.object_id)
def set_product(self, product):
self.content_type = ContentType.objects.get_for_model(type(product))
self.object_id = product.pk
product = property(get_product, set_product)
| lgpl-3.0 | -903,081,689,665,839,700 | 35 | 99 | 0.649145 | false |
brain-research/mirage-rl-qprop | rllab/envs/mujoco/ant_env.py | 2 | 2342 | from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
from rllab.envs.mujoco.mujoco_env import q_mult, q_inv
import numpy as np
import math
class AntEnv(MujocoEnv, Serializable):
FILE = 'ant.xml'
ORI_IND = 3
def __init__(self, *args, **kwargs):
super(AntEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
np.clip(self.model.data.cfrc_ext, -1, 1).flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
@overrides
def get_ori(self):
ori = [0, 1, 0, 0]
rot = self.model.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion
ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane
ori = math.atan2(ori[1], ori[0])
return ori
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| mit | -4,974,392,672,893,091,000 | 34.484848 | 108 | 0.596926 | false |
weng-lab/SnoPlowPy | snoPlowPy/exp_file.py | 1 | 5223 | #!/usr/bin/env python
from __future__ import print_function
import os
import json
from .files_and_paths import Dirs, Urls
from .utils import Utils
from .exp_file_metadata import ExpFileMetadata
class ExpFile(ExpFileMetadata):
def __init__(self, expID=None, fileID=None):
ExpFileMetadata.__init__(self)
self.expID = expID
self.fileID = fileID
# from http://stackoverflow.com/a/682545
@classmethod
def fromJson(cls, expID, fileID, j):
ret = cls(expID, fileID)
ret._parseJson(expID, fileID, j)
return ret
@classmethod
# in case file JSON is not part of the experiment json, for some unknown reason (revoked?)
def fromJsonFile(cls, expID, fileID, force):
ret = cls(expID, fileID)
jsonFnp = os.path.join(Dirs.encode_json, "exps", expID, fileID + ".json")
jsonUrl = Urls.base + "/files/{fileID}/?format=json".format(fileID=fileID)
Utils.ensureDir(jsonFnp)
Utils.download(jsonUrl, jsonFnp, True, force, skipSizeCheck=True)
with open(jsonFnp) as f:
j = json.load(f)
ret._parseJson(expID, fileID, j)
return ret
@classmethod
def fromWebservice(cls, expID, r):
ret = cls(expID, r["file"])
ret.parseWS(r)
return ret
@classmethod
def fromRoadmap(cls, eid, assay_term_name):
ret = cls(eid, eid)
ret.assembly = "hg19"
ret.assay_term_name = assay_term_name
ret.isPooled = True
return ret
def __repr__(self):
return "\t".join([str(x) for x in [self.fileID, self.file_format,
self.output_type,
"bio" + str(self.bio_rep),
"tech" + str(self.tech_rep),
"biological_replicates" +
str(self.biological_replicates),
self.jsonUrl, self.isPooled]])
def isPeaks(self):
return "peaks" == self.output_type
def isReplicatedPeaks(self):
return "replicated peaks" == self.output_type
def isBedNarrowPeak(self):
return "bed narrowPeak" == self.file_type
def isBedBroadPeak(self):
return "bed broadPeak" == self.file_type
def isIDRoptimal(self):
return "optimal idr thresholded peaks" == self.output_type
def isBed(self):
return "bed" == self.file_format
def isBigBed(self):
return "bigBed" == self.file_format
def isBam(self):
return "bam" == self.file_type
def isGtf(self):
return "gtf" == self.file_format
def isHdf5(self):
return "hdf5" == self.file_format
def isBigWig(self):
return "bigWig" == self.file_type
def isSignal(self):
return "signal" == self.output_type
def isRawSignal(self):
return "raw signal" == self.output_type
def isHotSpot(self):
return "hotspots" == self.output_type
def isFoldChange(self):
return "fold change over control" == self.output_type
def isIDR(self):
return "optimal idr thresholded peaks" == self.output_type
def isFastqOrFasta(self):
return "fasta" == self.file_type or "fastq" == self.file_type
def isTAD(self):
return "topologically associated domains" == self.output_type
def isTSV(self):
return "tsv" == self.file_type
def getControls(self):
x = set()
if "derived_from" in self.jsondata:
for i in self.jsondata["derived_from"]:
if "controlled_by" in i:
x.add(i["controlled_by"][0])
return list(x)
def fnp(self, s4s=False):
if self.expID.startswith("EN"):
d = os.path.join(Dirs.encode_data, self.expID)
fn = os.path.basename(self.url)
fnp = os.path.join(d, fn)
if s4s:
fnp = fnp.replace("/project/umw_", "/s4s/s4s_")
return fnp
if "H3K27ac" == self.assay_term_name:
fn = self.expID + "-H3K27ac.fc.signal.bigwig"
elif "DNase-seq" == self.assay_term_name:
fn = self.expID + "-DNase.fc.signal.bigwig"
else:
raise Exception("unknown ROADMAP file type")
return os.path.join(Dirs.roadmap_base, self.expID, fn)
def normFnp(self):
fnp = self.fnp()
fnp = fnp.replace("encode/data/", "encode/norm/")
fnp = fnp.replace("roadmap/data/consolidated",
"roadmap/data/norm/consolidated")
pre, ext = os.path.splitext(fnp)
if ".bigwig" == ext:
ext = ".bigWig"
return pre + ".norm" + ext
def download(self, force=None):
fnp = self.fnp()
Utils.ensureDir(fnp)
return Utils.download(self.url, fnp,
True, force, self.file_size_bytes)
def downloadPublic(self, force=None):
fnp = self.fnp()
Utils.ensureDir(fnp)
return Utils.download(self.url, fnp,
False, force, self.file_size_bytes)
def featurename(self):
return self.fileID
| mit | 879,180,562,057,076,400 | 30.463855 | 94 | 0.560789 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/oneconf/directconnect.py | 1 | 2946 | # Copyright (C) 2010 Canonical
#
# Authors:
# Didier Roche <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import gettext
from gettext import gettext as _
from oneconf.hosts import Hosts, HostError
import sys
class DirectConnect(object):
"""
Dummy backend handling exit and exception directly
"""
def _ensurePackageSetHandler(self):
'''Ensure we import the package set handler at the right time'''
from oneconf.packagesethandler import PackageSetHandler
self.PackageSetHandler = PackageSetHandler
def get_all_hosts(self):
'''get a dict of all available hosts'''
return Hosts().get_all_hosts()
def set_share_inventory(self, share_inventory, hostid=None, hostname=None):
'''update if we share the chosen host inventory on the server'''
try:
Hosts().set_share_inventory(share_inventory, hostid, hostname)
except HostError, e:
print(e)
sys.exit(1)
def get_packages(self, hostid, hostname, only_manual):
'''trigger getpackages handling'''
try:
self._ensurePackageSetHandler()
return self.PackageSetHandler().get_packages(hostid, hostname, only_manual)
except HostError, e:
print(e)
sys.exit(1)
def diff(self, hostid, hostname):
'''trigger diff handling'''
try:
self._ensurePackageSetHandler()
return self.PackageSetHandler().diff(hostid, hostname)
except HostError, e:
print(e)
sys.exit(1)
def update(self):
'''trigger update handling'''
try:
self._ensurePackageSetHandler()
self.PackageSetHandler().update()
except HostError, e:
print(e)
sys.exit(1)
def async_update(self):
'''only used in fallback mode: no async notion for direct connexion'''
self.update()
def get_last_sync_date(self):
'''get last time the store was successfully synced'''
return Hosts().get_last_sync_date()
def stop_service(self):
'''kindly ask the oneconf service to stop (not relevant for a direct mode)'''
print _("Nothing done: in direct mode, there is no communication with the service")
sys.exit(1)
| gpl-3.0 | 2,835,355,509,316,848,600 | 32.101124 | 91 | 0.647318 | false |
iansealy/rosalind | grph/grph-biopython.py | 1 | 1618 | #!/usr/bin/env python
"""This script is given "A collection of DNA strings in FASTA format having
total length at most 10 kbp" and returns "The adjacency list corresponding to
O3. You may return edges in any order".
"""
import argparse
from Bio import SeqIO
from Bio.Alphabet import generic_dna
def main(args):
"""Overlap Graphs"""
# Overlap constant
OVERLAP = 3
seq_by_prefix = {}
for record in SeqIO.parse(args.dataset, 'fasta', generic_dna):
try:
seq_by_prefix[str(record.seq[:OVERLAP])].append(record.id)
except KeyError:
seq_by_prefix[str(record.seq[:OVERLAP])] = [record.id]
graph = {}
args.dataset.seek(0)
for record in SeqIO.parse(args.dataset, 'fasta', generic_dna):
suffix = str(record.seq[-OVERLAP:])
try:
for overlap_id in seq_by_prefix[suffix]:
if record.id == overlap_id:
continue
try:
graph[record.id][overlap_id] = True
except KeyError:
graph[record.id] = {}
graph[record.id][overlap_id] = True
except:
pass
for id1 in graph:
for id2 in graph[id1]:
print('{:s} {:s}'.format(id1, id2))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Overlap Graphs')
parser.add_argument(
'dataset', metavar='FILE', type=argparse.FileType('r'),
help='A collection of DNA strings in FASTA format '
'having total length at most 10 kbp')
args = parser.parse_args()
main(args)
| gpl-3.0 | -3,033,114,035,736,476,000 | 28.962963 | 77 | 0.580964 | false |
mgorny/PyGithub | github/Organization.py | 1 | 47843 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2015 Sebastien Besson <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Matthew Neal <[email protected]> #
# Copyright 2016 Michael Pereira <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2017 Balázs Rostás <[email protected]> #
# Copyright 2018 Anton Nguyen <[email protected]> #
# Copyright 2018 Jacopo Notarstefano <[email protected]> #
# Copyright 2018 Jasper van Wanrooy <[email protected]> #
# Copyright 2018 Raihaan <[email protected]> #
# Copyright 2018 Tim Boring <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# Copyright 2018 Steve Kowalik <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import github.Event
import github.GithubObject
import github.NamedUser
import github.PaginatedList
import github.Plan
import github.Project
import github.Repository
import github.Team
from . import Consts
class Organization(github.GithubObject.CompletableGithubObject):
"""
This class represents Organizations. The reference can be found here http://developer.github.com/v3/orgs/
"""
def __repr__(self):
return self.get__repr__({"login": self._login.value})
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def billing_email(self):
"""
:type: string
"""
self._completeIfNotSet(self._billing_email)
return self._billing_email.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._public_members_url)
return self._public_members_url.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def two_factor_requirement_enabled(self):
"""
:type: bool
"""
self._completeIfNotSet(self._two_factor_requirement_enabled)
return self._two_factor_requirement_enabled.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_members(self, member, role=github.GithubObject.NotSet):
"""
:calls: `PUT /orgs/:org/memberships/:user <https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership>`_
:param member: :class:`github.NamedUser.NamedUser`
:param role: string
:rtype: None
"""
assert role is github.GithubObject.NotSet or isinstance(role, str), role
assert isinstance(member, github.NamedUser.NamedUser), member
put_parameters = {}
if role is not github.GithubObject.NotSet:
put_parameters["role"] = role
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/memberships/" + member._identity, input=put_parameters
)
def add_to_public_members(self, public_member):
"""
:calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/public_members/" + public_member._identity
)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters,
)
return github.Repository.Repository(
self._requester, headers, data, completed=True
)
def create_hook(
self,
name,
config,
events=github.GithubObject.NotSet,
active=github.GithubObject.NotSet,
):
"""
:calls: `POST /orgs/:owner/hooks <http://developer.github.com/v3/orgs/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(name, str), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(
isinstance(element, str) for element in events
), events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/hooks", input=post_parameters
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def create_repo(
self,
name,
description=github.GithubObject.NotSet,
homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet,
has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet,
has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet,
team_id=github.GithubObject.NotSet,
auto_init=github.GithubObject.NotSet,
license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet,
allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet,
allow_rebase_merge=github.GithubObject.NotSet,
delete_branch_on_merge=github.GithubObject.NotSet,
):
"""
:calls: `POST /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param team_id: : int
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:param delete_branch_on_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
assert description is github.GithubObject.NotSet or isinstance(
description, str
), description
assert homepage is github.GithubObject.NotSet or isinstance(
homepage, str
), homepage
assert private is github.GithubObject.NotSet or isinstance(
private, bool
), private
assert has_issues is github.GithubObject.NotSet or isinstance(
has_issues, bool
), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(
has_wiki, bool
), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(
has_downloads, bool
), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(
has_projects, bool
), has_projects
assert team_id is github.GithubObject.NotSet or isinstance(
team_id, int
), team_id
assert auto_init is github.GithubObject.NotSet or isinstance(
auto_init, bool
), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(
license_template, str
), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(
gitignore_template, str
), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(
allow_squash_merge, bool
), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(
allow_merge_commit, bool
), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(
allow_rebase_merge, bool
), allow_rebase_merge
assert delete_branch_on_merge is github.GithubObject.NotSet or isinstance(
delete_branch_on_merge, bool
), delete_branch_on_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if team_id is not github.GithubObject.NotSet:
post_parameters["team_id"] = team_id
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
if delete_branch_on_merge is not github.GithubObject.NotSet:
post_parameters["delete_branch_on_merge"] = delete_branch_on_merge
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/repos", input=post_parameters
)
return github.Repository.Repository(
self._requester, headers, data, completed=True
)
def create_team(
self,
name,
repo_names=github.GithubObject.NotSet,
permission=github.GithubObject.NotSet,
privacy=github.GithubObject.NotSet,
description=github.GithubObject.NotSet,
):
"""
:calls: `POST /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param repo_names: list of :class:`github.Repository.Repository`
:param permission: string
:param privacy: string
:param description: string
:rtype: :class:`github.Team.Team`
"""
assert isinstance(name, str), name
assert repo_names is github.GithubObject.NotSet or all(
isinstance(element, github.Repository.Repository) for element in repo_names
), repo_names
assert permission is github.GithubObject.NotSet or isinstance(
permission, str
), permission
assert privacy is github.GithubObject.NotSet or isinstance(
privacy, str
), privacy
assert description is github.GithubObject.NotSet or isinstance(
description, str
), description
post_parameters = {
"name": name,
}
if repo_names is not github.GithubObject.NotSet:
post_parameters["repo_names"] = [
element._identity for element in repo_names
]
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
if privacy is not github.GithubObject.NotSet:
post_parameters["privacy"] = privacy
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/teams", input=post_parameters
)
return github.Team.Team(self._requester, headers, data, completed=True)
def delete_hook(self, id):
"""
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/hooks/" + str(id)
)
def edit(
self,
billing_email=github.GithubObject.NotSet,
blog=github.GithubObject.NotSet,
company=github.GithubObject.NotSet,
description=github.GithubObject.NotSet,
email=github.GithubObject.NotSet,
location=github.GithubObject.NotSet,
name=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(
billing_email, str
), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, str), blog
assert company is github.GithubObject.NotSet or isinstance(
company, str
), company
assert description is github.GithubObject.NotSet or isinstance(
description, str
), description
assert email is github.GithubObject.NotSet or isinstance(email, str), email
assert location is github.GithubObject.NotSet or isinstance(
location, str
), location
assert name is github.GithubObject.NotSet or isinstance(name, str), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters["billing_email"] = billing_email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
self._useAttributes(data)
def edit_hook(
self,
id,
name,
config,
events=github.GithubObject.NotSet,
active=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(id, int), id
assert isinstance(name, str), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(
isinstance(element, str) for element in events
), events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url + "/hooks/" + str(id), input=post_parameters
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_events(self):
"""
:calls: `GET /orgs/:org/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event, self._requester, self.url + "/events", None
)
def get_hook(self, id):
"""
:calls: `GET /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"GET", self.url + "/hooks/" + str(id)
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_hooks(self):
"""
:calls: `GET /orgs/:owner/hooks <http://developer.github.com/v3/orgs/hooks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Hook.Hook`
"""
return github.PaginatedList.PaginatedList(
github.Hook.Hook, self._requester, self.url + "/hooks", None
)
def get_issues(
self,
filter=github.GithubObject.NotSet,
state=github.GithubObject.NotSet,
labels=github.GithubObject.NotSet,
sort=github.GithubObject.NotSet,
direction=github.GithubObject.NotSet,
since=github.GithubObject.NotSet,
):
"""
:calls: `GET /orgs/:org/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, str), filter
assert state is github.GithubObject.NotSet or isinstance(state, str), state
assert labels is github.GithubObject.NotSet or all(
isinstance(element, github.Label.Label) for element in labels
), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(
direction, str
), direction
assert since is github.GithubObject.NotSet or isinstance(
since, datetime.datetime
), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue, self._requester, self.url + "/issues", url_parameters
)
def get_members(
self, filter_=github.GithubObject.NotSet, role=github.GithubObject.NotSet
):
"""
:calls: `GET /orgs/:org/members <http://developer.github.com/v3/orgs/members>`_
:param filter_: string
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert filter_ is github.GithubObject.NotSet or isinstance(
filter_, str
), filter_
assert role is github.GithubObject.NotSet or isinstance(role, str), role
url_parameters = {}
if filter_ is not github.GithubObject.NotSet:
url_parameters["filter"] = filter_
if role is not github.GithubObject.NotSet:
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
url_parameters,
)
def get_projects(self, state=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/projects <https://developer.github.com/v3/projects/#list-organization-projects>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Project.Project`
:param state: string
"""
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
return github.PaginatedList.PaginatedList(
github.Project.Project,
self._requester,
self.url + "/projects",
url_parameters,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def get_public_members(self):
"""
:calls: `GET /orgs/:org/public_members <http://developer.github.com/v3/orgs/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/public_members",
None,
)
def get_outside_collaborators(self, filter_=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/outside_collaborators <http://developer.github.com/v3/orgs/outside_collaborators>`_
:param filter_: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert filter_ is github.GithubObject.NotSet or isinstance(
filter_, str
), filter_
url_parameters = {}
if filter_ is not github.GithubObject.NotSet:
url_parameters["filter"] = filter_
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/outside_collaborators",
url_parameters,
)
def remove_outside_collaborator(self, collaborator):
"""
:calls: `DELETE /orgs/:org/outside_collaborators/:username <https://developer.github.com/v3/orgs/outside_collaborators>`_
:param collaborator: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser), collaborator
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/outside_collaborators/" + collaborator._identity
)
def convert_to_outside_collaborator(self, member):
"""
:calls: `PUT /orgs/:org/outside_collaborators/:username <https://developer.github.com/v3/orgs/outside_collaborators>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/outside_collaborators/" + member._identity
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
headers, data = self._requester.requestJsonAndCheck(
"GET", "/repos/" + self.login + "/" + name
)
return github.Repository.Repository(
self._requester, headers, data, completed=True
)
def get_repos(
self,
type=github.GithubObject.NotSet,
sort=github.GithubObject.NotSet,
direction=github.GithubObject.NotSet,
):
"""
:calls: `GET /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param type: string ('all', 'public', 'private', 'forks', 'sources', 'member')
:param sort: string ('created', 'updated', 'pushed', 'full_name')
:param direction: string ('asc', desc')
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, str), type
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(
direction, str
), direction
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
url_parameters,
)
def get_team(self, id):
"""
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck("GET", "/teams/" + str(id))
return github.Team.Team(self._requester, headers, data, completed=True)
def get_team_by_slug(self, slug):
"""
:calls: `GET /orgs/:org/teams/:team_slug <https://developer.github.com/v3/teams>`_
:param slug: string
:rtype: :class:`github.Team.Team`
"""
assert isinstance(slug, str), slug
headers, data = self._requester.requestJsonAndCheck(
"GET", self.url + "/teams/" + slug
)
return github.Team.Team(self._requester, headers, data, completed=True)
def get_teams(self):
"""
:calls: `GET /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team, self._requester, self.url + "/teams", None
)
def invitations(self):
"""
:calls: `GET /orgs/:org/invitations <https://developer.github.com/v3/orgs/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/invitations",
None,
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
)
def invite_user(
self,
user=github.GithubObject.NotSet,
email=github.GithubObject.NotSet,
role=github.GithubObject.NotSet,
teams=github.GithubObject.NotSet,
):
"""
:calls: `POST /orgs/:org/invitations <http://developer.github.com/v3/orgs/members>`_
:param user: :class:`github.NamedUser.NamedUser`
:param email: string
:param role: string
:param teams: array of :class:`github.Team.Team`
:rtype: None
"""
assert user is github.GithubObject.NotSet or isinstance(
user, github.NamedUser.NamedUser
), user
assert email is github.GithubObject.NotSet or isinstance(email, str), email
assert (email is github.GithubObject.NotSet) ^ (
user is github.GithubObject.NotSet
), "specify only one of email or user"
parameters = {}
if user is not github.GithubObject.NotSet:
parameters["invitee_id"] = user.id
elif email is not github.GithubObject.NotSet:
parameters["email"] = email
if role is not github.GithubObject.NotSet:
assert isinstance(role, str), role
assert role in ["admin", "direct_member", "billing_manager"]
parameters["role"] = role
if teams is not github.GithubObject.NotSet:
assert all(isinstance(team, github.Team.Team) for team in teams)
parameters["team_ids"] = [t.id for t in teams]
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/invitations",
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
input=parameters,
)
def has_in_members(self, member):
"""
:calls: `GET /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET", self.url + "/members/" + member._identity
)
if status == 302:
status, headers, data = self._requester.requestJson(
"GET", headers["location"]
)
return status == 204
def has_in_public_members(self, public_member):
"""
:calls: `GET /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
status, headers, data = self._requester.requestJson(
"GET", self.url + "/public_members/" + public_member._identity
)
return status == 204
def remove_from_membership(self, member):
"""
:calls: `DELETE /orgs/:org/memberships/:user <https://developer.github.com/v3/orgs/members/#remove-organization-membership>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/memberships/" + member._identity
)
def remove_from_members(self, member):
"""
:calls: `DELETE /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/members/" + member._identity
)
def remove_from_public_members(self, public_member):
"""
:calls: `DELETE /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/public_members/" + public_member._identity
)
def create_migration(
self,
repos,
lock_repositories=github.GithubObject.NotSet,
exclude_attachments=github.GithubObject.NotSet,
):
"""
:calls: `POST /orgs/:org/migrations <https://developer.github.com/v3/migrations/users>`_
:param repos: list or tuple of str
:param lock_repositories: bool
:param exclude_attachments: bool
:rtype: :class:`github.Migration.Migration`
"""
assert isinstance(repos, (list, tuple)), repos
assert all(isinstance(repo, str) for repo in repos), repos
assert lock_repositories is github.GithubObject.NotSet or isinstance(
lock_repositories, bool
), lock_repositories
assert exclude_attachments is github.GithubObject.NotSet or isinstance(
exclude_attachments, bool
), exclude_attachments
post_parameters = {"repositories": repos}
if lock_repositories is not github.GithubObject.NotSet:
post_parameters["lock_repositories"] = lock_repositories
if exclude_attachments is not github.GithubObject.NotSet:
post_parameters["exclude_attachments"] = exclude_attachments
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/orgs/" + self.login + "/migrations",
input=post_parameters,
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
return github.Migration.Migration(
self._requester, headers, data, completed=True
)
def get_migrations(self):
"""
:calls: `GET /orgs/:org/migrations <https://developer.github.com/v3/migrations/users>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Migration.Migration`
"""
return github.PaginatedList.PaginatedList(
github.Migration.Migration,
self._requester,
"/orgs/" + self.login + "/migrations",
None,
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
def _initAttributes(self):
self._two_factor_requirement_enabled = github.GithubObject.NotSet
self._avatar_url = github.GithubObject.NotSet
self._billing_email = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_members_url = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "billing_email" in attributes: # pragma no branch
self._billing_email = self._makeStringAttribute(attributes["billing_email"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(
attributes["owned_private_repos"]
)
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_members_url" in attributes: # pragma no branch
self._public_members_url = self._makeStringAttribute(
attributes["public_members_url"]
)
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(
attributes["total_private_repos"]
)
if "two_factor_requirement_enabled" in attributes: # pragma no branch
self._two_factor_requirement_enabled = self._makeBoolAttribute(
attributes["two_factor_requirement_enabled"]
)
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| lgpl-3.0 | 4,850,455,243,568,008,000 | 39.067839 | 137 | 0.600385 | false |
swift-lang/swift-e-lab | parsl/configs/osg_ipp_multinode.py | 1 | 1298 | from parsl.executors.ipp_controller import Controller
from parsl.channels.ssh.ssh import SSHChannel
from parsl.providers.condor.condor import Condor
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
# This is an example config, make sure to
# replace the specific values below with the literal values
# (e.g., 'USERNAME' -> 'your_username')
config = Config(
executors=[
IPyParallelExecutor(
label='osg_remote_ipp',
provider=Condor(
channel=SSHChannel(
hostname='login.osgconnect.net',
username='USERNAME', # Please replace USERNAME with your username
script_dir='/home/USERNAME/parsl_scripts', # Please replace USERNAME with your username
),
nodes_per_block=1,
init_blocks=4,
max_blocks=4,
scheduler_options='Requirements = OSGVO_OS_STRING == "RHEL 6" && Arch == "X86_64" && HAS_MODULES == True',
worker_init='', # Input your worker_init if needed
walltime="01:00:00"
),
controller=Controller(public_ip='PUBLIC_IP'), # Please replace PUBLIC_IP with your public ip
)
],
)
| apache-2.0 | 1,564,827,968,648,172,500 | 40.870968 | 123 | 0.59168 | false |
ianawilson/inspecttp | inspecttp/settings.py | 1 | 5359 | # Django settings for inspecttp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'rp4e3=*8l)wmk!zow0y69tl)rylq3q*xs!k8^-(#mt9hc@m9e!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'inspecttp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'inspecttp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 2,355,210,286,938,402,300 | 33.352564 | 127 | 0.686509 | false |
JohnDoee/txasgiresource | txasgiresource/tests/test_http.py | 1 | 6939 | import os
import shutil
import tempfile
from twisted.internet import defer
from twisted.python import failure
from twisted.trial.unittest import TestCase
from .. import http as asgihttp
from ..http import ASGIHTTPResource
from ..utils import sleep
from .utils import DummyApplication, DummyRequest
class TestASGIHTTP(TestCase):
def setUp(self):
self.application = DummyApplication()
self.base_scope = {"_ssl": "", "path": "/"}
self._prepare_request()
self.temp_path = tempfile.mkdtemp()
def _prepare_request(self):
self.request = DummyRequest([b"test", b"path"])
self.request.uri = b"http://dummy/test/path?a=b"
self.request_finished_defer = self.request.notifyFinish()
self.resource = ASGIHTTPResource(
self.application, self.base_scope, 1, use_x_sendfile=True
)
def tearDown(self):
shutil.rmtree(self.temp_path)
@defer.inlineCallbacks
def test_normal_http_request(self):
self.resource.render(self.request)
self.assertEqual(
self.application.scope,
{
"type": "http",
"scheme": "http",
"http_version": "1.0",
"method": "GET",
"path": "/",
},
)
self.assertEqual(
self.application.queue.get_nowait(),
{"type": "http.request", "body": b"", "more_body": False},
)
self.resource.handle_reply(
{
"type": "http.response.start",
"status": 200,
"headers": [
[b"server", b"my server software"],
[b"x-isgood", b"yes"],
[b"x-isgood", b"no"],
],
}
)
self.resource.handle_reply(
{"type": "http.response.body", "body": b"this is the result"}
)
yield self.request_finished_defer
expected_headers = [
(b"X-Isgood", [b"yes", b"no"]),
(b"Server", [b"my server software"]),
]
for header in list(self.request.responseHeaders.getAllRawHeaders()):
expected_headers.remove(header)
self.assertEqual(expected_headers, [])
self.assertEqual(self.request.written[0], b"this is the result")
self.assertEqual(self.request.responseCode, 200)
@defer.inlineCallbacks
def test_timeout(self):
self.resource.render(self.request)
yield sleep(1.1)[0]
yield self.request_finished_defer
self.assertIn(b"Timeout", self.request.written[0])
self.assertEqual(self.request.responseCode, 504)
@defer.inlineCallbacks
def test_cancel_defer(self):
self.resource.render(self.request)
self.resource.reply_defer.cancel()
yield self.request_finished_defer
self.assertIn(b"cancelled", self.request.written[0])
self.assertEqual(self.request.responseCode, 503)
@defer.inlineCallbacks
def test_http_reply_chunked_body(self):
body = os.urandom(asgihttp.MAXIMUM_CONTENT_SIZE * 2 + 50)
self.request.content.write(body)
self.request.content.seek(0, 0)
self.resource.render(self.request)
self.assertEqual(
self.application.queue.get_nowait(),
{
"type": "http.request",
"body": body[: asgihttp.MAXIMUM_CONTENT_SIZE],
"more_body": True,
},
)
self.assertEqual(
self.application.queue.get_nowait(),
{
"type": "http.request",
"body": body[
asgihttp.MAXIMUM_CONTENT_SIZE : asgihttp.MAXIMUM_CONTENT_SIZE * 2
],
"more_body": True,
},
)
self.assertEqual(
self.application.queue.get_nowait(),
{
"type": "http.request",
"body": body[asgihttp.MAXIMUM_CONTENT_SIZE * 2 :],
"more_body": False,
},
)
self.resource.reply_defer.cancel()
try:
yield self.request_finished_defer
except:
pass
@defer.inlineCallbacks
def test_http_request_connection_lost(self):
self.resource.render(self.request)
self.request.processingFailed(failure.Failure(Exception()))
try:
yield self.request_finished_defer
except:
pass
else:
self.fail("Should raise an exception")
self.assertEqual(
self.application.queue.get_nowait(),
{"type": "http.request", "body": b"", "more_body": False},
)
self.assertEqual(
self.application.queue.get_nowait(), {"type": "http.disconnect"}
)
@defer.inlineCallbacks
def test_http_request_sendfile(self):
temp_file = os.path.join(self.temp_path, "tempfile")
file_payload = b"a" * 50
with open(temp_file, "wb") as f:
f.write(file_payload)
# normal request
self.resource.render(self.request)
self.resource.handle_reply(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"x-sendfile", temp_file.encode("utf-8")]],
}
)
self.resource.handle_reply({"type": "http.response.body", "body": b""})
yield self.request_finished_defer
self.assertEqual(self.request.responseCode, 200)
self.assertEqual(self.request.written[0], file_payload)
# cached request
etag = self.request.responseHeaders.getRawHeaders("etag")
self._prepare_request()
self.request.requestHeaders.addRawHeader(b"if-none-match", etag[0])
self.resource.render(self.request)
self.resource.handle_reply(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"x-sendfile", temp_file.encode("utf-8")]],
}
)
self.resource.handle_reply({"type": "http.response.body", "body": b""})
yield self.request_finished_defer
self.assertEqual(self.request.responseCode, 304)
if len(self.request.written) > 0 and self.request.written[0] != b"":
self.fail("Unexpected data written")
# file gone request
self._prepare_request()
os.remove(temp_file)
self.resource.render(self.request)
self.resource.handle_reply(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"x-sendfile", temp_file.encode("utf-8")]],
}
)
self.resource.handle_reply({"type": "http.response.body", "body": b""})
yield self.request_finished_defer
self.assertEqual(self.request.responseCode, 404)
| mit | 3,870,830,473,698,091,000 | 30.116592 | 85 | 0.550079 | false |
unt-libraries/coda | coda/coda_validate/views.py | 1 | 18393 | import json
import random
import datetime
from codalib import APP_AUTHOR
from codalib.bagatom import wrapAtom, makeObjectFeed
from dateutil import parser
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.paginator import Paginator
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import get_object_or_404, render
from django.utils.feedgenerator import Atom1Feed
from lxml import etree
from django.views.generic import ListView
from .models import Validate
XML_HEADER = b"<?xml version=\"1.0\"?>\n%s"
class CorrectMimeTypeFeed(Atom1Feed):
mime_type = 'application/xml'
class AtomNextNewsFeed(Feed):
"""
next view.
an atom pub representation of the next validation to occur.
should be a single item.
"""
feed_type = Atom1Feed
link = "/validate/next/"
title = "UNT Coda Validate App"
subtitle = "The highest priority validation item"
reason = 'None'
author_name = APP_AUTHOR.get('name', None)
author_link = APP_AUTHOR.get('uri', None)
feed_type = CorrectMimeTypeFeed
def get_object(self, request, server):
if server:
return server
else:
return None
def items(self, obj):
# need to filter by server first, if provided
reason = ''
if obj:
validations = Validate.objects.all().filter(server=obj)
reason = 'This selection was filtered to only consider \
server %s. ' % obj
else:
validations = Validate.objects.all()
# next check if we have any with a priority above 0
v = validations.filter(
priority__gt=0).order_by('priority_change_date')
if v.exists():
reason += 'Item was chosen because it is the \
oldest prioritized.'
# if set is empty, go with any priority with last_verified older than
# settings.VALIDATION_PERIOD
else:
# It might seem natural to use django's built-in random ordering,
# but that technique becomes slow when using large sets
# because 'order by ?' is very expensive against MySQL dbs.
# v = Validate.objects.all().filter(
# last_verified__gte=datetime.datetime.now() -
# settings.VALIDATION_PERIOD
# ).order_by('?')
# instead, let's do this:
# http://elpenia.wordpress.com/2010/05/11/getting-random-objects-from-a-queryset-in-django/
now = datetime.datetime.now()
v = validations.filter(
last_verified__lte=now - settings.VALIDATION_PERIOD
)
if v.exists():
random_slice = int(random.random() * v.count())
v = v[random_slice:]
reason += 'Item was randomly selected and within the \
past year because there is no prioritized record.'
# if that set has no objects, pick the oldest verified item.
else:
v = validations.order_by('last_verified')
reason += 'Item was chosen because there \
is no prioritized record and it had not been validated in the longest \
duration of time.'
self.reason = reason
return v[:1]
def item_title(self, item):
return item.identifier
def item_description(self, item):
return self.reason
def item_link(self, item):
return '/APP/validate/%s/' % item.identifier
# for some reason, I couldn't get AtomNextFeed to work without a server
# I don't think optional arguments are supported for class-based syndication
# feeds, so I have this work around to make it work.
class AtomNextFeedNoServer(AtomNextNewsFeed):
def get_object(self, request):
pass
def index(request):
context = {
'recently_prioritized': Validate.objects.filter(
priority__gt=0).order_by('-priority_change_date')[:20],
'recently_verified': Validate.objects.all().order_by('-last_verified')[:20],
'verified_counts': Validate.objects.last_verified_status_counts()
}
return render(request, 'coda_validate/index.html', context)
def last_day_of_month(year, month):
""" Work out the last day of the month """
last_days = [31, 30, 29, 28, 27]
for i in last_days:
try:
end = datetime.datetime(year, month, i)
except ValueError:
continue
else:
return end.day
return None
def stats(request):
"""
stats page
"""
if not Validate.objects.exists():
return render(
request,
'coda_validate/stats.html',
{
'sums_by_date': {},
'validations': None,
'this_month': None,
'last_24h': None,
'last_vp': None,
'unverified': 0,
'passed': 0,
'failed': 0,
'validation_period': '%s days' % str(
settings.VALIDATION_PERIOD.days
),
}
)
# resolve the range for last month filter
today = datetime.date.today()
first = datetime.date(day=1, month=today.month, year=today.year)
last_day = last_day_of_month(first.year, first.month)
this_month_range = [
'%s-%s-01 00:00:00' % (first.year, first.month),
'%s-%s-%s 23:59:59' % (first.year, first.month, last_day),
]
# resolve the range for last 24 hours filter
now = datetime.datetime.now()
twenty_four_hours_ago = now - datetime.timedelta(hours=24)
since_validation_period = now - datetime.timedelta(
days=settings.VALIDATION_PERIOD.days)
# make a set of data that makes sense for the heatmap
result_counts = Validate.objects.last_verified_status_counts()
total = sum(result_counts.values())
sums_by_date = Validate.sums_by_date()
sums_by_date_g = {}
years = set()
for dt, ct in sums_by_date.items():
y, m, d = dt
dt = (y, m - 1, d)
sums_by_date_g[dt] = ct
years.add(y)
sums_by_date = sums_by_date_g
num_years = len(years)
return render(
request,
'coda_validate/stats.html',
{
'sums_by_date': dict((('%d, %d, %d' % s, c)
for s, c in sums_by_date.items())),
'num_years': num_years,
'validations': total,
'this_month': Validate.objects.filter(
last_verified__range=this_month_range).count(),
'last_24h': Validate.objects.filter(
last_verified__range=[twenty_four_hours_ago, now]).count(),
'last_vp': Validate.objects.filter(
last_verified__range=[since_validation_period, now]).count(),
'unverified': result_counts.get('Unverified'),
'passed': result_counts.get('Passed'),
'failed': result_counts.get('Failed'),
'validation_period': '%s days' % str(settings.VALIDATION_PERIOD.days),
}
)
def prioritize(request):
"""
prioritize view
"""
identifier = request.GET.get('identifier')
prioritized = False
if identifier:
v = get_object_or_404(Validate, identifier=identifier)
v.priority = 1
v.priority_change_date = datetime.datetime.now()
v.save()
prioritized = True
return render(
request,
'coda_validate/prioritize.html',
{
'identifier': identifier,
'prioritized': prioritized,
}
)
def validate(request, identifier):
"""
prioritize view
"""
# this view always gets an identifier, if it's wrong, 404
v = get_object_or_404(Validate, identifier=identifier)
# clicked priority button on validate detail page
p = request.GET.get('priority')
if p == '1':
v.priority = 1
v.priority_change_date = datetime.datetime.now()
v.save()
return render(
request,
'coda_validate/validate.html',
{
'validate': v,
}
)
def prioritize_json(request):
"""
prioritize json view
"""
DOMAIN = Site.objects.get_current().domain
identifier = request.GET.get('identifier')
json_dict = {}
json_dict['status'] = 'failure'
status = 404
if identifier:
json_dict['requested_identifier'] = identifier
try:
v = Validate.objects.get(identifier=identifier)
except Exception:
v = None
if v:
v.priority = 1
v.priority_change_date = datetime.datetime.now()
v.save()
json_dict['status'] = 'success'
json_dict['priority'] = v.priority
json_dict['priority_change_date'] = str(v.priority_change_date)
json_dict['atom_pub_url'] = '%s/APP/validate/%s' % \
(DOMAIN, v.identifier)
status = 200
else:
json_dict['response'] = 'identifier was not found'
json_dict['requested_identifier'] = identifier
else:
json_dict['response'] = 'missing identifier parameter'
json_dict['requested_identifier'] = ''
status = 400
response = HttpResponse(content_type='application/json', status=status)
json.dump(
json_dict,
fp=response,
indent=4,
sort_keys=True,
)
return response
def validateToXML(validateObject):
"""
This is the reverse of xmlToValidateObject.
Given a "Validate" object, it generates an
XML object representative of such.
"""
# define namespace
validate_namespace = "http://digital2.library.unt.edu/coda/validatexml/"
val = "{%s}" % validate_namespace
validate_nsmap = {"validate": validate_namespace}
# build xml from object and return
XML = etree.Element("{0}validate".format(val), nsmap=validate_nsmap)
label = etree.SubElement(XML, "{0}identifier".format(val))
label.text = validateObject.identifier
last_verified = etree.SubElement(XML, "{0}last_verified".format(val))
last_verified.text = validateObject.last_verified.isoformat()
last_verified_status = etree.SubElement(XML, "{0}last_verified_status".format(val))
last_verified_status.text = validateObject.last_verified_status
priority_change_date = etree.SubElement(XML, "{0}priority_change_date".format(val))
priority_change_date.text = validateObject.priority_change_date.isoformat()
priority = etree.SubElement(XML, "{0}priority".format(val))
priority.text = str(validateObject.priority)
server = etree.SubElement(XML, "{0}server".format(val))
server.text = validateObject.server
return XML
def xmlToValidateObject(validateXML):
"""
Parse the XML in a POST request and create the validate object
"""
entryRoot = etree.XML(validateXML)
if entryRoot is None:
raise ValueError("Unable to parse uploaded XML")
# parse XML
contentElement = entryRoot.xpath("*[local-name() = 'content']")[0]
validateXML = contentElement.xpath("*[local-name() = 'validate']")[0]
identifier = validateXML.xpath(
"*[local-name() = 'identifier']")[0].text.strip()
last_verified = validateXML.xpath(
"*[local-name() = 'last_verified']")[0].text.strip()
last_verified = parser.parse(last_verified)
last_verified_status = validateXML.xpath(
"*[local-name() = 'last_verified_status']")[0].text.strip()
priority_change_date = validateXML.xpath(
"*[local-name() = 'priority_change_date']")[0].text.strip()
priority_change_date = parser.parse(priority_change_date)
priority = validateXML.xpath(
"*[local-name() = 'priority']")[0].text.strip()
server = validateXML.xpath("*[local-name() = 'server']")[0].text.strip()
# make the object and return
validate = Validate(
identifier=identifier,
last_verified=last_verified,
last_verified_status=last_verified_status,
priority_change_date=priority_change_date,
priority=priority,
server=server,
)
return validate
def xmlToUpdateValidateObject(validateXML):
"""
Parse the XML in a PUT request and adjust the validate based on that
*ONLY MODIFIES 'last_verified_status'*
"""
entryRoot = etree.XML(validateXML)
if entryRoot is None:
raise ValueError("Unable to parse uploaded XML")
# parse XML
contentElement = entryRoot.xpath("*[local-name() = 'content']")[0]
validateXML = contentElement.xpath("*[local-name() = 'validate']")[0]
identifier = validateXML.xpath(
"*[local-name() = 'identifier']")[0].text.strip()
last_verified_status = validateXML.xpath(
"*[local-name() = 'last_verified_status']")[0].text.strip()
# get the object (or 404) and return to the APP view to finish up.
validate = get_object_or_404(Validate, identifier=identifier)
validate.last_verified_status = last_verified_status
validate.last_verified = datetime.datetime.now()
validate.priority = 0
validate.save()
return validate
def app_validate(request, identifier=None):
"""
This method handles the ATOMpub protocol for validate objects
"""
# are we POSTing a new identifier here?
if request.method == 'POST' and not identifier:
# to object
validateObject = xmlToValidateObject(request.body)
validateObject.save()
# and back to xml
validateObjectXML = validateToXML(validateObject)
atomXML = wrapAtom(
xml=validateObjectXML,
id='http://%s/APP/validate/%s/' % (
request.META['HTTP_HOST'], validateObject.identifier
),
title=validateObject.identifier,
)
atomText = XML_HEADER % etree.tostring(atomXML, pretty_print=True)
resp = HttpResponse(atomText, content_type="application/atom+xml")
resp.status_code = 201
resp['Location'] = 'http://%s/APP/validate/%s/' % \
(request.META['HTTP_HOST'], validateObject.identifier)
elif request.method == 'HEAD':
resp = HttpResponse(content_type="application/atom+xml")
resp.status_code = 200
# if not, return a feed
elif request.method == 'GET' and not identifier:
# negotiate the details of our feed here
validates = Validate.objects.all()
page = int(request.GET['page']) if request.GET.get('page') else 1
atomFeed = makeObjectFeed(
paginator=Paginator(validates, 20),
objectToXMLFunction=validateToXML,
feedId=request.path[1:],
webRoot='http://%s' % request.META.get('HTTP_HOST'),
title="validate Entry Feed",
idAttr="identifier",
nameAttr="identifier",
dateAttr="added",
request=request,
page=page,
author={
"name": APP_AUTHOR.get('name', None),
"uri": APP_AUTHOR.get('uri', None)
},
)
atomFeedText = XML_HEADER % etree.tostring(atomFeed, pretty_print=True)
resp = HttpResponse(atomFeedText, content_type="application/atom+xml")
resp.status_code = 200
# updating an existing record
elif request.method == 'PUT' and identifier:
returnValidate = xmlToUpdateValidateObject(request.body)
validateObjectXML = validateToXML(returnValidate)
atomXML = wrapAtom(
xml=validateObjectXML,
id='http://%s/APP/validate/%s/' % (
request.META['HTTP_HOST'], identifier
),
title=identifier,
)
atomText = XML_HEADER % etree.tostring(atomXML, pretty_print=True)
resp = HttpResponse(atomText, content_type="application/atom+xml")
resp.status_code = 200
elif request.method == 'GET' and identifier:
# attempt to retrieve record -- error if unable
try:
validate_object = Validate.objects.get(identifier=identifier)
except Validate.DoesNotExist:
return HttpResponseNotFound(
"There is no validate for identifier %s.\n" % identifier
)
returnValidate = validate_object
validateObjectXML = validateToXML(returnValidate)
atomXML = wrapAtom(
xml=validateObjectXML,
id='http://%s/APP/validate/%s/' % (
request.META['HTTP_HOST'], identifier
),
title=identifier,
author=APP_AUTHOR.get('name', None),
author_uri=APP_AUTHOR.get('uri', None)
)
atomText = XML_HEADER % etree.tostring(atomXML, pretty_print=True)
resp = HttpResponse(atomText, content_type="application/atom+xml")
resp.status_code = 200
elif request.method == 'DELETE' and identifier:
# attempt to retrieve record -- error if unable
try:
validate_object = Validate.objects.get(identifier=identifier)
except:
return HttpResponseNotFound(
"Unable to Delete. There is no identifier %s.\n" % identifier)
# grab the validate, delete it, and inform the user.
returnValidate = validate_object
validateObjectXML = validateToXML(returnValidate)
validate_object.delete()
atomXML = wrapAtom(
xml=validateObjectXML,
id='http://%s/APP/validate/%s/' % (
request.META['HTTP_HOST'], identifier
),
title=identifier,
)
atomText = XML_HEADER % etree.tostring(atomXML, pretty_print=True)
resp = HttpResponse(atomText, content_type="application/atom+xml")
resp.status_code = 200
return resp
def check_json(request):
counts = Validate.objects.last_verified_status_counts()
return HttpResponse(json.dumps(counts), content_type='application/json')
class ValidateListView(ListView):
model = Validate
template_name = 'coda_validate/list.html'
context_object_name = 'validation_list'
paginate_by = 20
def get_queryset(self):
queryset = super(ValidateListView, self).get_queryset()
status = self.request.GET.get('status')
if status:
queryset = queryset.filter(last_verified_status=status)
return queryset
| bsd-3-clause | -3,355,197,747,545,127,400 | 33.835227 | 103 | 0.608547 | false |
lgrahl/scripthookvpy3k | python/gta/ui/primitive.py | 1 | 3310 | """
Primitive UI elements.
"""
import gta_native
from gta import Font
from gta.ui import Item, Point, Color
__all__ = ('Rectangle', 'Label')
class Rectangle(Item):
def draw(self, offset=Point.Zero, **settings):
# TODO: Remove logging
from gta import utils
logger = utils.get_logger('gta.RECTANGLE')
# Override default settings
settings.update(self._settings)
# Calculate position and dimension
x, y = self.get_coordinates(offset)
width, height = self.get_dimension()
text_scale = 0.4
line_width = 350.0
line_height = 15.0
line_top = 18.0
line_left = 0.0
text_left = 5.0
line_width_scaled = line_width / 1280
line_top_scaled = line_top / 720
text_left_scaled = text_left / 1280
line_height_scaled = line_height / 720
line_left_scaled = line_left / 1280
# Use native functions to draw
logger.warning('x: {}, y: {}, width: {}, height: {}, color: {}',
x, y, width, height, self.color.rgba)
logger.warning('gta_native.graphics.get_screen_resolution()')
gta_native.graphics.get_screen_resolution()
logger.info('Screen resolution: {}', gta_native.graphics.get_screen_resolution())
logger.warning('gta_native.ui.set_text_font(Font.chalet_london)')
gta_native.ui.set_text_font(Font.chalet_london)
logger.warning('gta_native.ui.set_text_scale(0.0, 0.35)')
gta_native.ui.set_text_scale(0.0, text_scale)
logger.warning('gta_native.ui.set_text_colour(*Color.White.rgba)')
gta_native.ui.set_text_colour(*Color.White.rgba)
logger.warning('gta_native.ui.set_text_centre(True)')
gta_native.ui.set_text_centre(False)
logger.warning('gta_native.ui.set_text_dropshadow(0, 0, 0, 0, 0)')
gta_native.ui.set_text_dropshadow(0, 0, 0, 0, 0)
logger.warning('gta_native.ui.set_text_edge(0, 0, 0, 0, 0)')
gta_native.ui.set_text_edge(0, 0, 0, 0, 0)
logger.warning('gta_native.ui._SET_TEXT_ENTRY(\'STRING\')')
gta_native.ui._SET_TEXT_ENTRY('STRING')
logger.warning('gta_native.ui._ADD_TEXT_COMPONENT_STRING(\'TEST\')')
gta_native.ui._ADD_TEXT_COMPONENT_STRING('TEST')
logger.warning('gta_native.ui._DRAW_TEXT(text_left_scaled, no_idea_1)')
no_idea_1 = line_top_scaled + 0.00278 + line_height_scaled - 0.005
logger.info('text_left_scaled={}, no_idea_1={}', text_left_scaled, no_idea_1)
gta_native.ui._DRAW_TEXT(text_left_scaled, no_idea_1)
no_idea_2 = gta_native.ui._0xDB88A37483346780(text_scale, 0)
logger.info('line_left_scaled={}, line_top_scaled + 0.00278={}, line_width_scaled={}, no_idea_2 + line_height_scaled*2.0 + 0.005={}, *self.color.rgba={}',
line_left_scaled, line_top_scaled + 0.00278, line_width_scaled, no_idea_2 + line_height_scaled*2.0 + 0.005, *self.color.rgba)
gta_native.graphics.draw_rect(
line_left_scaled,
line_top_scaled + 0.00278,
line_width_scaled,
no_idea_2 + line_height_scaled*2.0 + 0.005,
*self.color.rgba
)
# gta_native.graphics.draw_rect(x, y, width, height, *self.color.rgba)
class Label(Item):
pass
| mit | 6,848,006,543,339,703,000 | 40.898734 | 162 | 0.607855 | false |
fupadev/FuME | fume/ui/filter.py | 1 | 2752 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'filter.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_filter(object):
def setupUi(self, filter):
filter.setObjectName("filter")
filter.resize(381, 435)
self.gridLayout = QtWidgets.QGridLayout(filter)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtWidgets.QDialogButtonBox(filter)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 3, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(filter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.listWidget = QtWidgets.QListWidget(filter)
self.listWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.listWidget.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.listWidget.setObjectName("listWidget")
self.gridLayout.addWidget(self.listWidget, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(filter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(filter)
self.lineEdit.setInputMask("")
self.lineEdit.setPlaceholderText("")
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.checkBox = QtWidgets.QCheckBox(filter)
self.checkBox.setObjectName("checkBox")
self.horizontalLayout.addWidget(self.checkBox)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.label_2.raise_()
self.listWidget.raise_()
self.buttonBox.raise_()
self.label.raise_()
self.retranslateUi(filter)
self.buttonBox.accepted.connect(filter.accept)
self.buttonBox.rejected.connect(filter.reject)
QtCore.QMetaObject.connectSlotsByName(filter)
def retranslateUi(self, filter):
_translate = QtCore.QCoreApplication.translate
filter.setWindowTitle(_translate("filter", "Filter bearbeiten"))
self.label_2.setText(_translate("filter", "Markiere alle Mannschaften, die hinzugefügt werden sollen"))
self.label.setText(_translate("filter", "Suche:"))
self.checkBox.setText(_translate("filter", "markiert"))
| gpl-3.0 | -4,410,328,212,400,600,000 | 44.85 | 111 | 0.700109 | false |
google/flax | flax/core/variables.py | 1 | 1558 | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A variable dict is a normal Python dictionary, which is a container for one
or more "variable collections", each of which are nested dictionaries whose
leaves are ``jax.numpy`` arrays.
The different variable collections share the same nested tree structure.
For example, consider the following variable dictionary::
{
"params": {
"Conv1": { "weight": ..., "bias": ... },
"BatchNorm1": { "scale": ..., "mean": ... },
"Conv2": {...}
},
"batch_stats": {
"BatchNorm1": { "moving_mean": ..., "moving_average": ...}
}
}
In this case, the ``"BatchNorm1"`` key lives in both the ``"params"`` and
```"batch_stats""`` collections. This reflects the fact that the submodule
named ``""BatchNorm1""`` has both trainable parameters (the ``"params"`` collection),
as well as other non-trainable variables (the ``"batch_stats"`` collection)
TODO: Make "variable dict" design note, and link to it from here.
"""
from .scope import Variable
| apache-2.0 | -3,274,223,196,165,504,000 | 36.095238 | 85 | 0.693838 | false |
rodrigolucianocosta/ProjectParking | ProjectParking/Parking/django-localflavor-1.1/docs/conf.py | 1 | 10708 | # -*- coding: utf-8 -*-
#
# django-localflavor documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 2 17:56:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('extensions'))
sys.path.insert(0, os.path.abspath('..'))
import django
django.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode', 'promises', 'settings']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-localflavor'
copyright = u'Django Software Foundation and individual contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
from localflavor import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-localflavordoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-localflavor.tex', u'django-localflavor Documentation',
u'Django Software Foundation and individual contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-localflavor', u'django-localflavor Documentation',
[u'Django Software Foundation and individual contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-localflavor', u'django-localflavor Documentation',
u'Django Software Foundation and individual contributors', 'django-localflavor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'django-localflavor'
epub_author = u'Django Software Foundation and individual contributors'
epub_publisher = u'Django Software Foundation and individual contributors'
epub_copyright = u'Django Software Foundation and individual contributors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'django': ('http://docs.djangoproject.com/en/dev/',
'http://docs.djangoproject.com/en/dev/_objects/'),
}
| mpl-2.0 | -4,950,238,410,156,170,000 | 31.646341 | 119 | 0.707508 | false |
meraki-analytics/cassiopeia-datastores | cassiopeia-diskstore/cassiopeia_diskstore/patch.py | 1 | 1306 | from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, DataSink, PipelineContext
from cassiopeia.dto.patch import PatchListDto
from .common import SimpleKVDiskService
T = TypeVar("T")
class PatchDiskService(SimpleKVDiskService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@DataSink.dispatch
def put(self, type: Type[T], item: T, context: PipelineContext = None) -> None:
pass
@DataSink.dispatch
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
@get.register(PatchListDto)
def get_patches(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> PatchListDto:
key = "{clsname}".format(clsname=PatchListDto.__name__)
return PatchListDto(self._get(key))
@put.register(PatchListDto)
def put_patches(self, item: PatchListDto, context: PipelineContext = None) -> None:
key = "{clsname}".format(clsname=PatchListDto.__name__)
self._put(key, item)
| mit | -4,634,316,932,255,871,000 | 34.297297 | 119 | 0.689127 | false |
rtfd/readthedocs.org | readthedocs/doc_builder/base.py | 1 | 4554 | """Base classes for Builders."""
import logging
import os
import shutil
from functools import wraps
from readthedocs.projects.models import Feature
log = logging.getLogger(__name__)
def restoring_chdir(fn):
# XXX:dc: This would be better off in a neutral module
@wraps(fn)
def decorator(*args, **kw):
try:
path = os.getcwd()
return fn(*args, **kw)
finally:
os.chdir(path)
return decorator
class BaseBuilder:
"""
The Base for all Builders. Defines the API for subclasses.
Expects subclasses to define ``old_artifact_path``, which points at the
directory where artifacts should be copied from.
"""
_force = False
ignore_patterns = []
old_artifact_path = None
def __init__(self, build_env, python_env, force=False):
self.build_env = build_env
self.python_env = python_env
self.version = build_env.version
self.project = build_env.project
self.config = python_env.config if python_env else None
self._force = force
self.project_path = self.project.checkout_path(self.version.slug)
self.target = self.project.artifact_path(
version=self.version.slug,
type_=self.type,
)
def get_final_doctype(self):
"""Some builders may have a different doctype at build time."""
return self.config.doctype
def force(self, **__):
"""An optional step to force a build even when nothing has changed."""
log.info('Forcing a build')
self._force = True
def append_conf(self):
"""Set custom configurations for this builder."""
pass
def build(self):
"""Do the actual building of the documentation."""
raise NotImplementedError
def move(self, **__):
"""Move the generated documentation to its artifact directory."""
if os.path.exists(self.old_artifact_path):
if os.path.exists(self.target):
shutil.rmtree(self.target)
log.info('Copying %s on the local filesystem', self.type)
log.debug('Ignoring patterns %s', self.ignore_patterns)
shutil.copytree(
self.old_artifact_path,
self.target,
ignore=shutil.ignore_patterns(*self.ignore_patterns),
)
else:
log.warning('Not moving docs, because the build dir is unknown.')
def clean(self, **__):
"""Clean the path where documentation will be built."""
if os.path.exists(self.old_artifact_path):
shutil.rmtree(self.old_artifact_path)
log.info('Removing old artifact path: %s', self.old_artifact_path)
def docs_dir(self, docs_dir=None, **__):
"""Handle creating a custom docs_dir if it doesn't exist."""
if docs_dir:
return docs_dir
for doc_dir_name in ['docs', 'doc', 'Doc', 'book']:
possible_path = os.path.join(self.project_path, doc_dir_name)
if os.path.exists(possible_path):
return possible_path
return self.project_path
def create_index(self, extension='md', **__):
"""Create an index file if it needs it."""
docs_dir = self.docs_dir()
index_filename = os.path.join(
docs_dir,
'index.{ext}'.format(ext=extension),
)
if not os.path.exists(index_filename):
readme_filename = os.path.join(
docs_dir,
'README.{ext}'.format(ext=extension),
)
if os.path.exists(readme_filename):
return 'README'
if not self.project.has_feature(Feature.DONT_CREATE_INDEX):
index_text = """
Welcome to Read the Docs
------------------------
This is an autogenerated index file.
Please create an ``index.{ext}`` or ``README.{ext}`` file with your own content
under the root (or ``/docs``) directory in your repository.
If you want to use another markup, choose a different builder in your settings.
Check out our `Getting Started Guide
<https://docs.readthedocs.io/en/latest/getting_started.html>`_ to become more
familiar with Read the Docs.
"""
with open(index_filename, 'w+') as index_file:
index_file.write(index_text.format(dir=docs_dir, ext=extension))
return 'index'
def run(self, *args, **kwargs):
"""Proxy run to build environment."""
return self.build_env.run(*args, **kwargs)
| mit | 4,921,347,170,352,560,000 | 30.625 | 84 | 0.593544 | false |
PhenixI/machine-learning | 2_supervised_regression/1-Linear Regression/ransacregression/ransacregression.py | 1 | 1734 | #load data Housing Dataset: https://archive.ics.uci.edu/ml/datasets/Housing
import pandas as pd
#df = pd.read_csv('https://archive.ics.uci.edu/ml/datasets/housing/Housing.data',header = None,sep = '\s+')
df = pd.read_csv('F:/developSamples/ml/housing.data',header = None,sep = '\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS','NOX', 'RM', 'AGE', 'DIS', 'RAD','TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
X = df['RM'].values.reshape(-1,1)
y = df['MEDV'].values.reshape(-1,1)
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
#By setting the residual_threshold parameter to 5.0, we
#only allowed samples to be included in the inlier set if their vertical distance to the
#fitted line is within 5 distance units, which works well on this particular dataset.
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
residual_metric=lambda x: np.sum(np.abs(x), axis=1),
residual_threshold=5.0,
random_state=0)
ransac.fit(X,y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_) | gpl-2.0 | -2,341,679,451,319,219,000 | 43.487179 | 116 | 0.65917 | false |
Bmillidgework/Misc-Maths | Misc/rejection_sample.py | 1 | 7356 | # okay, time to test this out here
# in a simple python script
import numpy as np
import random
def rejection_sample(fn, envelope_fn, squeeze_fn, N):
samples = []
num_accepted = 0
num_rejected = 0
for i in xrange(N):
position = sample(envelope_fn) # sample a position from the envelope function
height_multiplier = np.random.uniform(0,1) # sample a height multiplier from the random uniform - this is what generates the independent samlpes
env_height = envelope_fn(position) # evaluate the envelope functoin at the point provided
squeeze_height = squeeze_fn(position)
sample_height = env_height * height_multiplier
if sample_height <= squeeze_height:
samples.append(sample_height)
num_accepted +=1
if sample_height > squeeze_height:
distribution_height = fn(position)
if sample_height <= distribution_height:
samples.append(sample_height)
num_accepted +=1
if sample_height > distribution_height
num_rejected +=1
samples = np.array(samples)
return samples, num_accepted, num_rejected
# okay, let's try to implement this thing somehow. Not totally sure how but get the basic ideas
# so how are we going to represent these things. we're going to need to represent a bunch of piecewise functoins, and a good datastructure to dothat. We're also going to have to do the standard rejection sampling and chord test in a way that hopefully makes some kind of sense. Further, we're going to have to do beginning and initialisatoin and all sorts of stuff really
# and provide a functoin to calculate the density. we've got to work out how that's going to work too. so let's get on this
# the good thing is this ars method is really complicated and requires a huge amount of horrible calculations so it's no fun at all really and makes the GAN seem much more reasonable!
# let's start doing some truly horrible calculatoins, which I hate... dagnabbit! I'm not sure of good datastructuresfor doing this... some kind of binary trees? for now let's assume we have a vector of absiccae
#we're also going to calculate finite differences. let's do thatquick
def finite_difference(fn, point, peturbation = 1e-6):
return (fn(point + peturbation) - fn(point))/peturbation
# okay, now let's get our points of abscissae
abscissae = []
hs = []
abscissae_derivatives = []
def get_hs(abscissae, fn):
hs =[]
for i in xrange(abscissae):
hs.append(fn(abscissae[i]))
hs = np.array(hs)
return hs
def calculate_abscissae_derivatives_and_hs(abscissae, fn):
hs= []
abscissae_derivatives = []
for i in xrange(len(abscissae)):
hs.append(fn(abscissae[i]))
abscissae_derivatives.append(finite_difference(fn, abscissae[i]))
abscissae_derivatives = np.array(abscissae_derivatives)
hs = np.array(hs)
return abscissae_derivatives, hs
def get_tangent_intersection_points(abscissae, abscissae_derivatives):
assert len(abscissae) == len(abscissae_derivatives), 'all points must have a corresponding derivative'
zs = []
for i in xrange(len(abscissae)-1):
x = abscissae[i]
xplus=abscissae[i+1]
hx = abscissae[i]
hdashx = abscissae_derivatives[i]
hxplus = abscissae[i+1]
hdashxplus = abscissae_derivatives[i+1]
zs.append((hxplus - hx -(xplus*hdashxplus) + (x*hdashx))/(hdashx - hdashxplus))
zs = np.array(zs)
return zs
# I'm really not sure what datastructures we're going to use to figure this out. first let's actually try to sample a point and see what's going on
def get_piece_cumsum(hx, hdashx, z, zminus,x):
int1 = np.exp((hx - x*hdashx)/hdashx))
int2 = np.exp(z*hdashx) - np.exp(zminus*hdashx)
return int1*int2
def get_cumsums(abscissae, abscissae_derivatives, zs,hs):
cumsums = []
total = 0
# I think there should be one less z than xs so that's good, so lets assert that and then ope for the bes
assert len(abscissae) == len(abscissae_derivatives) == len(hs) == len(zs) +1, 'lengths are incorrect'
for i in xrange(len(zs)):
if i == 0:
cumsum= get_piece_cumsum(hs[i], abscissae_derivatives[i], zs[i], 0,abscissae[i])
cumsums.append(cumsum)
total += cumsum
cumsum= get_piece_cumsum(hs[i], abscissae_derivatives[i],zs[i], zs[i-1], abscissae[i])
cumsums.append(cumsum)
total +=cumsum
cumsums = np.array(cumsums)
return cumsums, total
def get_index_upper(cumsums, total):
curr = 0
for i in xrange(len(cumsums)):
curr += cumsums[i]
if total <= curr:
diff = curr - total
return i,diff
#I don't think we should need this, as it should always return in the loop, I think
#if it goes through all of them
raise ValueError('total is greater than cumulative sum!')
#return len(cumsum)
def sample_single_piece(cum, hdashx, hx,x):
frac = (cum *hdashx)/(np.exp(hdashx) - (x*hdashx))
return (1/hdashx)*np.log((np.exp(hdashx) + frac))
def sample_upper(xs, hs, hdashes,zs):
u = np.random.uniform(0,1)
#we ned cumulative sum now
cumsums, total = get_cumsums(xs, hdashes, zs, hs)
c = u*total
#now we need to get the index of the cumulative sum so we can calculate the region we need it in
i,diff = get_index_upper(cumsums, c)
sample = sample_single_piece(diff, hdashes[i],hs[i],xs[i])
# okay, now we can sample from that distribution what is the next step
# we assume the abscissae are sorted into ascending value
def get_nearest_abscissa_indices(value, abscissae):
for i in xrange(len(abscissae)):
if value > abscissae[i]:
return i-1, i
raise ValueError('Provided value greater than domain of this distribution')
def get_nearest_zs_index(value, zs):
for i in xrange(len(zs)):
if value > zs[i]:
return i-1,i
raise ValueError('Provided value greater than the domain of this distribution')
def get_lower_hull(value, xs, hs):
i, iplus = get_nearest_abscissa_indices(value, xs)
return (((xs[iplus]-value)*hs[i]) +((value-xs[i])*hs[iplus]))/xs[iplus]-xs[i]
def get_upper_hull_value(value, xs,zs):
i, iplus = get_nearest_zs_index(value, zs)
x = xs[]
hx= hs[i]
hdashx = hdashxs[i]
return hx + (value - x)*hdashx
def add_new_point_to_hull(point):
pass
#this logic will be truly horrible. implement later
def initialise_ars():
pass
#also do this logic later
# now we should have all the helper functions we need to create the actual adapter rejection sampler
def rejection_sample(fn,N):
samples = []
tries = []
n=0
num_acceptances = 0
num_rejections =0
while n < N:
xstar = sample_upper(xs, hs, hdashes, zs)
u = np.random.uniform(0,1)
w = xstar*u
#squeezing step!
if w <= np.exp(get_lower_hull(xstar,hs,zs)-get_upper_hull_value(xstar,xs,zs)):
#we accept
samples.append(xstar)
tries.append(1)
n+=1
num_acceptances +=1
else:
if w<=np.exp(fn(xstar)-get_upper_hull_value(xstar, xs,zs)):
samples.append(xstar)
tries.append(1)
add_new_point_to_hull(fn(xstar))
n+=1
num_acceptances +=1
else:
#we reject - dagnabbit!
tries.append(0)
num_rejections +=1
return samples, num_acceptances, num_rejections, tries
# so that's a very simple oeriew of the algorithm ,which is cool and nice and wonderful.
# I mean it will never work, but we kind of understand it and get the complexity behind it which is cool. funnily enough it's the sampling from the upper which is the worst of the work
# this is also kind of a huge overhead so it'sgot to be worth it over additoinal function evlauations, but perhaps it is?
# I wonder if our GAN method can do better?
| mit | -5,688,628,599,580,016,000 | 34.196172 | 371 | 0.717102 | false |
andytopham/podplayer | oled.py | 1 | 6071 | #!/usr/bin/python
''' Module to control the picaxe OLED.
Updated to work with both 16x2 and 20x4 versions.
Requires new picaxe fw that inverts serial polarity, i.e. N2400 -> T2400.
The oled modules work fine off the RPi 3v3, which avoids the need for level shifting.
Requires the installation of the python serial module. Install by:
sudo apt-get install python-serial
edit /boot/cmdline.txt to remove all refs to console=ttyAMA0... and kgdboc=ttyAMA0...
edit /etc/inittab to comment out the last line (T0:23...)
To get rid of the garbage from the pi bootup...
edit /boot/cmdline.txt and remove both references to ...ttyAMA0...
Brightness control: http://www.picaxeforum.co.uk/entry.php?49-Winstar-OLED-Brightness-Control
'''
import serial
import subprocess, time, logging, datetime
import threading, Queue
LOGFILE = 'log/oled.log'
ROWLENGTH4 = 20
ROWLENGTH2 = 16
LAST_PROG_ROW4 = 2
LAST_PROG_ROW2 = 0
class Screen(threading.Thread):
''' Oled class. Routines for driving the serial oled. '''
def __init__(self, rows = 4):
self.Event = threading.Event()
# self.threadLock = threading.Lock()
threading.Thread.__init__(self, name='myoled')
self.q = Queue.Queue(maxsize=6)
self.logger = logging.getLogger(__name__)
self.set_rowcount(rows)
self.port = serial.Serial(
port='/dev/ttyAMA0',
baudrate=2400,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO) # Note - not just one stop bit
#constants
self.rowselect = [128,192,148,212] # the addresses of the start of each row
self.startpt=0
self.initialise()
def set_rowcount(self, rows):
self.rowcount = rows
if rows == 4:
self.rowlength = ROWLENGTH4
self.last_prog_row = LAST_PROG_ROW4
else:
self.rowlength = ROWLENGTH2
self.last_prog_row = LAST_PROG_ROW2
def run(self):
print 'Starting oled queue manager.'
myevent = False
while not myevent:
while not self.q.empty():
entry = self.q.get()
self.writerow(entry[0], entry[1])
self.q.task_done()
myevent = self.Event.wait(.5) # wait for this timeout or the flag being set.
print 'Oled exiting'
def initialise(self):
# self.port.open()
self.logger.info("Opened serial port")
self.port.write(chr(254)) # cmd
self.port.write(chr(1)) # clear display
self.startpt = 0
self.writerow(0, ' ')
self.writerow(1, ' ')
self.writerow(2,' ')
self.writerow(3,' ')
return(0)
def info(self):
return(self.rowcount, self.rowlength)
def write_button_labels(self, next, stop):
# These are the botton labels. No labels with small display.
if next == True:
self.q.put([0,'Next '])
if stop == True:
self.q.put([0,'Stop '])
return(0)
def write_radio_extras(self, string1, temperature, chgvol_flag = False):
if chgvol_flag:
self.q.put([self.rowcount-1, string1])
else:
self.q.put([self.rowcount-1,'{0:5s}{1:7.1f}^C'.format(string1.ljust(self.rowlength-9),float(temperature))])
return(0)
def clear(self):
self.port.write(chr(254)) # cmd
self.port.write(chr(1)) # clear display
time.sleep(.5)
def writerow(self,row,string):
if row < self.rowcount:
self.port.write(chr(254)) # cmd
self.port.write(chr(self.rowselect[row])) # move to start of row
self.port.write(string[0:self.rowlength].ljust(self.rowlength))
def scroll(self,string):
if self.rowcount > 2:
self.writerow(1,string[0:20])
self.writerow(2,string[20:40].ljust(20)) # pad out the missing chars with spaces
self.writerow(3,string[40:60].ljust(20)) # pad out the missing chars with spaces
# pauseCycles=5
# self.startpt += 1
# string = string + ' ' # add a trailing blank to erase as we scroll
# if self.startpt > len(string): # finished scrolling this string, reset.
# self.startpt = 0
# if self.startpt < pauseCycles: # only start scrolling after 8 cycles.
# startpoint=0
# else:
# startpoint = self.startpt-pauseCycles
# if len(string[40:]) > 21 : # so it needs scrolling
# if False: # temporary to stop the scrolling
# print "String:",string[40:]
# print "Startpoint:",startpoint
# self.writerow(3,string[40+startpoint:60+startpoint])
# else:
# self.writerow(3,string[40:60].ljust(20)) # pad out the missing chars with spaces
else: # only 2 rows
pauseCycles=5
self.startpt += 1
string = string + ' ' # add a trailing blank to erase as we scroll
if self.startpt > len(string): # finished scrolling this string, reset.
self.startpt = 0
if self.startpt < pauseCycles: # only start scrolling after 8 cycles.
startpoint=0
else:
startpoint = self.startpt-pauseCycles
self.writerow(1,string[startpoint:startpoint+self.rowlength])
return(0)
def screensave(self):
while True:
for j in range(self.rowcount):
self.writerow(j,".")
for i in range(self.rowlength-1):
time.sleep(.5)
self.port.write(".")
for j in range(self.rowcount):
self.writerow(j," ")
for i in range(self.rowlength-1):
time.sleep(.5)
self.port.write(" ")
return(0)
def off(self):
self.port.write(chr(254)) # cmd
self.port.write(chr(8))
time.sleep(.2)
def on(self):
self.port.write(chr(254)) # cmd
self.port.write(chr(12))
time.sleep(.2)
if __name__ == "__main__":
print "Running oled class as a standalone app"
logging.basicConfig(filename= LOGFILE,
filemode='w',
level=logging.INFO) #filemode means that we do not append anymore
# Default level is warning, level=logging.INFO log lots, level=logging.DEBUG log everything
logging.warning(datetime.datetime.now().strftime('%d %b %H:%M')+". Running oled class as a standalone app")
myOled = Screen()
myOled.clear()
myOled.start()
myOled.writerow(0," OLED class ")
myOled.writerow(1,"Config size="+str(myOled.rowlength)+"x"+str(myOled.rowcount))
if myOled.rowcount > 2:
myOled.writerow(2,"01234567890123456789")
myOled.writerow(3,"Running oled.py ")
time.sleep(2)
myOled.Event.set()
print 'Ending oled main prog.'
| mit | -5,049,980,333,659,571,000 | 32.174863 | 112 | 0.672871 | false |
vrutkovs/atomic-reactor | atomic_reactor/plugins/pre_resolve_module_compose.py | 1 | 8023 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Takes a reference to a module, and looks up or triggers a compose in the on-demand compose
server (ODCS). In addition to retrieving the URL for a composed yum repository, the module
and all its dependencies are resolved to particular versions matching the ones that the
repository is built from.
Example configuration:
{
'name': 'resolve_module_compose',
'args': {'module_name': 'myapp',
'module_stream': 'f26',
'module_version': '20170629185228',
'odcs_url': 'https://odcs.fedoraproject.org/odcs/1'},
'pdc_url': 'https://pdc.fedoraproject.org/rest_api/v1',}
}
"""
import os
import re
from modulemd import ModuleMetadata
from pdc_client import PDCClient
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.odcs_util import ODCSClient
class ModuleInfo(object):
def __init__(self, name, stream, version, mmd, rpms):
self.name = name
self.stream = stream
self.version = version
self.mmd = mmd
self.rpms = rpms
class ComposeInfo(object):
def __init__(self, source_spec, compose_id, base_module, modules, repo_url):
self.source_spec = source_spec
self.compose_id = compose_id
self.base_module = base_module
self.modules = modules
self.repo_url = repo_url
def koji_metadata(self):
sorted_modules = [self.modules[k] for k in sorted(self.modules.keys())]
return {
'source_modules': [self.source_spec],
'modules': ['-'.join((m.name, m.stream, m.version)) for
m in sorted_modules]
}
WORKSPACE_SOURCE_KEY = 'compose_info'
def get_compose_info(workflow):
key = ResolveModuleComposePlugin.key
if key not in workflow.plugin_workspace:
return None
return workflow.plugin_workspace[key].get(WORKSPACE_SOURCE_KEY, None)
def set_compose_info(workflow, source):
key = ResolveModuleComposePlugin.key
workflow.plugin_workspace.setdefault(key, {})
workspace = workflow.plugin_workspace[key]
workspace[WORKSPACE_SOURCE_KEY] = source
class ResolveModuleComposePlugin(PreBuildPlugin):
key = "resolve_module_compose"
is_allowed_to_fail = False
def __init__(self, tasker, workflow,
module_name, module_stream, module_version=None,
compose_id=None,
odcs_url=None, odcs_insecure=False,
odcs_openidc_secret_path=None,
pdc_url=None, pdc_insecure=False):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param module_name: Module name to look up in PDC
:param module_stream: Module stream to look up in PDC
:param module_version: Module version to look up in PDC (optional)
:param compose_id: ID of compose in ODCS (optional - will only be set for workers)
:param odcs_url: URL of ODCS (On Demand Compose Service)
:param odcs_insecure: If True, don't check SSL certificates for `odcs_url`
:param odcs_openidc_secret_path: directory to look in for a `token` file (optional)
:param pdc_url: URL of PDC (Product Definition center))
:param pdc_insecure: If True, don't check SSL certificates for `pdc_url`
:
"""
# call parent constructor
super(ResolveModuleComposePlugin, self).__init__(tasker, workflow)
if not pdc_url:
raise RuntimeError("pdc_url is required")
if not odcs_url:
raise RuntimeError("odcs_url is required")
self.module_name = module_name
self.module_stream = module_stream
if module_version is not None and re.match(r'^\d{14}$', module_version) is None:
raise RuntimeError("module_version should be 14 digits")
self.module_version = module_version
self.compose_id = compose_id
self.odcs_url = odcs_url
self.odcs_insecure = odcs_insecure
self.odcs_openidc_secret_path = odcs_openidc_secret_path
self.pdc_url = pdc_url
self.pdc_insecure = pdc_insecure
def _resolve_compose(self):
if self.odcs_openidc_secret_path:
token_path = os.path.join(self.odcs_openidc_secret_path, 'token')
with open(token_path, "r") as f:
odcs_token = f.read().strip()
else:
odcs_token = None
odcs_client = ODCSClient(self.odcs_url, insecure=self.odcs_insecure, token=odcs_token)
# The effect of develop=True is that requests to the PDC are made without authentication;
# since we our interaction with the PDC is read-only, this is fine for our needs and
# makes things simpler.
pdc_client = PDCClient(server=self.pdc_url, ssl_verify=not self.pdc_insecure, develop=True)
fmt = '{n}-{s}' if self.module_version is None else '{n}-{s}-{v}'
source_spec = fmt.format(n=self.module_name, s=self.module_stream, v=self.module_version)
if self.compose_id is None:
self.compose_id = odcs_client.start_compose(source_type='module',
source=source_spec)['id']
compose_info = odcs_client.wait_for_compose(self.compose_id)
if compose_info['state_name'] != "done":
raise RuntimeError("Compose cannot be retrieved, state='%s'" %
compose_info['state_name'])
compose_source = compose_info['source']
self.log.info("Resolved list of modules: %s", compose_source)
resolved_modules = {}
for module_spec in compose_source.strip().split():
m = re.match(r'^(.*)-([^-]+)-(\d{14})$', module_spec)
if not m:
raise RuntimeError("Cannot parse resolved module in compose: %s" % module_spec)
module_name = m.group(1)
module_stream = m.group(2)
module_version = m.group(3)
query = {
'variant_id': module_name,
'variant_version': module_stream,
'variant_release': module_version,
'active': True,
}
self.log.info("Looking up module metadata for '%s' in the PDC", module_spec)
retval = pdc_client['unreleasedvariants/'](page_size=-1,
fields=['modulemd', 'rpms'], **query)
# Error handling
if not retval:
raise RuntimeError("Failed to find module in PDC %r" % query)
if len(retval) != 1:
raise RuntimeError("Multiple modules in the PDC matched %r" % query)
mmd = ModuleMetadata()
mmd.loads(retval[0]['modulemd'])
rpms = set(retval[0]['rpms'])
resolved_modules[module_name] = ModuleInfo(module_name, module_stream, module_version,
mmd, rpms)
base_module = resolved_modules[self.module_name]
assert base_module.stream == self.module_stream
if self.module_version is not None:
assert base_module.version == self.module_version
return ComposeInfo(source_spec=source_spec,
compose_id=self.compose_id,
base_module=base_module,
modules=resolved_modules,
repo_url=compose_info['result_repo'] + '/$basearch/os/')
def run(self):
"""
run the plugin
"""
self.log.info("Resolving module compose for name=%s, stream=%s, version=%s",
self.module_name, self.module_stream, self.module_version)
compose_info = self._resolve_compose()
set_compose_info(self.workflow, compose_info)
| bsd-3-clause | 9,058,405,850,769,840,000 | 37.204762 | 99 | 0.60177 | false |
JonLerida/X-Serv-Practica-Aparcamientos | project/aparcamientos/migrations/0002_auto_20170524_1849.py | 1 | 5694 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('aparcamientos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Estilo',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('size', models.IntegerField(default='80')),
('color', models.CharField(default='#D8FFD1', max_length=20)),
('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Guardado',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Pagina',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('nombre', models.CharField(default='', max_length=200)),
('enlace', models.CharField(default='', max_length=200)),
('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Usuario',
),
migrations.RemoveField(
model_name='aparcamiento',
name='datos',
),
migrations.AddField(
model_name='aparcamiento',
name='codigo_postal',
field=models.CharField(blank=True, default='', max_length=10),
),
migrations.AddField(
model_name='aparcamiento',
name='email',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AddField(
model_name='aparcamiento',
name='localidad',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AddField(
model_name='aparcamiento',
name='number',
field=models.CharField(blank=True, default='', max_length=10),
),
migrations.AddField(
model_name='aparcamiento',
name='provincia',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AddField(
model_name='aparcamiento',
name='puntuacion',
field=models.IntegerField(blank=True, default='0'),
),
migrations.AddField(
model_name='aparcamiento',
name='telefono',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AddField(
model_name='aparcamiento',
name='url',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AddField(
model_name='aparcamiento',
name='via',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AddField(
model_name='comentario',
name='aparcamiento',
field=models.ForeignKey(null=True, to='aparcamientos.Aparcamiento'),
),
migrations.AddField(
model_name='comentario',
name='usuario',
field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='aparcamiento',
name='accesible',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='aparcamiento',
name='barrio',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='aparcamiento',
name='descripcion',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='aparcamiento',
name='distrito',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='aparcamiento',
name='latitud',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='aparcamiento',
name='longitud',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='aparcamiento',
name='nombre',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='comentario',
name='fecha',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='comentario',
name='texto',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='guardado',
name='aparcamiento',
field=models.ForeignKey(to='aparcamientos.Aparcamiento'),
),
migrations.AddField(
model_name='guardado',
name='usuario',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| apache-2.0 | -7,302,533,556,450,678,000 | 34.811321 | 114 | 0.544433 | false |
bwmichael/jccc-cis142-python | chapter4/project1-yahtzee.py | 1 | 3244 | ##
# @author Brandon Michael
# This program simulates a simplified game of Yahtzee where a player
# plays against the computer.
# Allow your program to use the randint function
import random
# Create and initialize pre-loop "constants"
TWO_OF_KIND_POINTS = 25
YAHTZEE_POINTS = 50
# Create any other pre-loop variables you may need
userInput = "y"
# Running Total Values
userTotal = 0
computerTotal = 0
##
# Display the dice rolls based on the rules of Yahtzee with 3 rolls of a dice
def displaydicerolls(roll_1, roll_2, roll_3):
if roll_1 == roll_2 and roll_1 == roll_3:
return "Yahtzee! " + "(+" + str(YAHTZEE_POINTS) + ")"
elif roll_1 == roll_2 or roll_1 == roll_3 or roll_2 == roll_3:
return "Two of a Kind! " + "(+" + str(TWO_OF_KIND_POINTS) + ")"
else:
result = roll_1 + roll_2 + roll_3
return "Chance! " + "(+" + str(result) + ")"
##
# Calculate and return the points awarded 3 rolls of a dice
# @return points awarded for the 3 rolls
def calculatedicerolls(roll_1, roll_2, roll_3):
if roll_1 == roll_2 and roll_1 == roll_3:
return YAHTZEE_POINTS
elif roll_1 == roll_2 or roll_1 == roll_3 or roll_2 == roll_3:
return TWO_OF_KIND_POINTS
else:
result = roll_1 + roll_2 + roll_3
return result
# Continue to roll dice while the user enters an upper
# case or lower case Y.
while userInput == "y" or userInput == "Y":
# For the player and computer, roll the three dice and display the dice
# values. You will need to remember each die value.
# Player Values
userRoll1 = random.randint(1, 6)
userRoll2 = random.randint(1, 6)
userRoll3 = random.randint(1, 6)
# Computer Values
computerRoll1 = random.randint(1, 6)
computerRoll2 = random.randint(1, 6)
computerRoll3 = random.randint(1, 6)
# If the values rolled were all the same, display "Yahtzee!" and
# and the number of points for a yahtzee are earned for the player
# else if two values rolled were the same, display "Two of a Kind!" and
# the number of points for two of a kind are earned for the player
# else display chance and the sum of all three dice are earned for
# the player and computer
print("Player rolls: " +
str(userRoll1) + ", " +
str(userRoll2) + ", " +
str(userRoll3) + "\n" +
displaydicerolls(userRoll1, userRoll2, userRoll3))
print("Computer rolls: " +
str(computerRoll1) + ", " +
str(computerRoll2) + ", " +
str(computerRoll3) + "\n" +
displaydicerolls(computerRoll1, computerRoll2, computerRoll3))
# If you haven't already done so, tack the points earned onto
# a running total for the player and computer
userTotal = userTotal + calculatedicerolls(userRoll1, userRoll2, userRoll3)
computerTotal = computerTotal + calculatedicerolls(
computerRoll1, computerRoll2, computerRoll3)
# Show the current totals
print("=============================")
print("Player total points: " + str(userTotal))
print("Computer total points: " + str(computerTotal))
print("=============================")
# Prompt whether to roll again
userInput = input("Roll again (Y or N)? ")
| apache-2.0 | -5,997,405,287,427,632,000 | 33.88172 | 79 | 0.637793 | false |
florian-f/sklearn | sklearn/tree/tree.py | 1 | 32488 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Code is originally adapted from MILK: Machine Learning Toolkit
# Copyright (C) 2008-2011, Luis Pedro Coelho <[email protected]>
# License: MIT. See COPYING.MIT file in the milk distribution
# Authors: Brian Holt, Peter Prettenhofer, Satrajit Ghosh, Gilles Louppe,
# Noel Dawe
# License: BSD3
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.selector_mixin import SelectorMixin
from ..utils import array2d, check_random_state
from ..utils.validation import check_arrays
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CLASSIFICATION = {
"gini": _tree.Gini,
"entropy": _tree.Entropy,
}
REGRESSION = {
"mse": _tree.MSE,
}
def export_graphviz(decision_tree, out_file=None, feature_names=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to graphviz.
out : file object or string, optional (default=None)
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
Returns
-------
out_file : file object
The file object to which the tree was exported. The user is
expected to `close()` this object when done with it.
Examples
--------
>>> import os
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> import tempfile
>>> export_file = tree.export_graphviz(clf,
... out_file='test_export_graphvix.dot')
>>> export_file.close()
>>> os.unlink(export_file.name)
"""
def node_to_str(tree, node_id):
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "error = %.4f\\nsamples = %s\\nvalue = %s" \
% (tree.init_error[node_id],
tree.n_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\nerror = %s\\nsamples = %s\\nvalue = %s" \
% (feature,
tree.threshold[node_id],
tree.init_error[node_id],
tree.n_samples[node_id],
value)
def recurse(tree, node_id, parent=None):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF: # and right_child != _tree.TREE_LEAF
recurse(tree, left_child, node_id)
recurse(tree, right_child, node_id)
if out_file is None:
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0)
else:
recurse(decision_tree.tree_, 0)
out_file.write("}")
return out_file
class BaseDecisionTree(BaseEstimator, SelectorMixin):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state):
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
if compute_importances:
warn("Setting compute_importances=True is no longer "
"required. Variable importances are now computed on the fly "
"when accessing the feature_importances_ attribute. This "
"parameter will be removed in 0.15.", DeprecationWarning)
self.compute_importances = compute_importances
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.find_split_ = _tree.TREE_SPLIT_BEST
self.tree_ = None
def fit(self, X, y,
sample_mask=None, X_argsorted=None,
check_input=True, sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32``
and ``order='F'`` for maximum efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_mask : array-like, shape = [n_samples], dtype = bool or None
A bit mask that encodes the rows of ``X`` that should be
used to build the decision tree. It can be used for bagging
without the need to create of copy of ``X``.
If None a mask will be created that includes all samples.
X_argsorted : array-like, shape = [n_samples, n_features] or None
Each column of ``X_argsorted`` holds the row indices of ``X``
sorted according to the value of the corresponding feature
in ascending order.
I.e. ``X[X_argsorted[i, k], k] <= X[X_argsorted[j, k], k]``
for each j > i.
If None, ``X_argsorted`` is computed internally.
The argument is supported to enable multiple decision trees
to share the data structure and to avoid re-computation in
tree ensembles. For maximum efficiency use dtype np.int32.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
if check_input:
X, y = check_arrays(X, y)
random_state = check_random_state(self.random_state)
# Convert data
if (getattr(X, "dtype", None) != DTYPE or
X.ndim != 2 or
not X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if is_classification:
criterion = CLASSIFICATION[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = REGRESSION[self.criterion](self.n_outputs_)
# Check parameters
max_depth = np.inf if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if self.min_density < 0.0 or self.min_density > 1.0:
raise ValueError("min_density must be in [0, 1]")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_mask is not None:
sample_mask = np.asarray(sample_mask, dtype=np.bool)
if sample_mask.shape[0] != n_samples:
raise ValueError("Length of sample_mask=%d does not match "
"number of samples=%d"
% (sample_mask.shape[0], n_samples))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if X_argsorted is not None:
X_argsorted = np.asarray(X_argsorted, dtype=np.int32,
order='F')
if X_argsorted.shape != X.shape:
raise ValueError("Shape of X_argsorted does not match "
"the shape of X")
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
self.tree_ = _tree.Tree(self.n_features_, self.n_classes_,
self.n_outputs_, criterion, max_depth,
min_samples_split, self.min_samples_leaf,
self.min_density, max_features,
self.find_split_, random_state)
self.tree_.build(X, y,
sample_weight=sample_weight,
sample_mask=sample_mask,
X_argsorted=X_argsorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba[:, 0], axis=1),
axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance [4]_.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, 0, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized)total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
R2 scores (a.k.a. coefficient of determination) over 10-folds CV:
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier : A classifier base on extremely randomized trees
sklearn.ensemble.ExtraTreesClassifier : An ensemble of extra-trees for
classification
sklearn.ensemble.ExtraTreesRegressor : An ensemble of extra-trees for
regression
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
| bsd-3-clause | 8,197,242,999,076,984,000 | 37.131455 | 79 | 0.553004 | false |
thisismedium/md | docs/examples/fluidprint.py | 1 | 1620 | from __future__ import print_function
import __builtin__, sys, contextlib, StringIO
from md import fluid
__all__ = (
'display', 'read', 'current_input_port', 'current_output_port',
'output_to_string', 'input_from_string',
'output_to_file', 'input_from_file'
)
## Default to None. This let's the cells play more nicely with code
## that changes sys.stdout/sys.stderr directly (like doctest).
## Binding directly to sys.stdout / sys.stderr is more ideal.
CURRENT_OUTPUT_PORT = fluid.cell(None, type=fluid.acquired)
CURRENT_INPUT_PORT = fluid.cell(None, type=fluid.acquired)
def current_output_port():
return CURRENT_OUTPUT_PORT.value or sys.stdout
def current_input_port():
return CURRENT_INPUT_PORT.value or sys.stdin
def display(*args, **kwargs):
kwargs.setdefault('file', current_output_port())
return __builtin__.print(*args, **kwargs)
def read(*args):
return current_input_port().read(*args)
@contextlib.contextmanager
def output_to_string(*args):
with contextlib.closing(StringIO.StringIO(*args)) as port:
with CURRENT_OUTPUT_PORT.let(port):
yield
@contextlib.contextmanager
def input_from_string(*args):
with contextlib.closing(StringIO.StringIO(*args)) as port:
with CURRENT_INPUT_PORT.let(port):
yield
@contextlib.contextmanager
def output_to_file(filename, mode='w'):
with contextlib.closing(open(filename, mode)) as port:
with CURRENT_OUTPUT_PORT.let(port):
yield
@contextlib.contextmanager
def input_from_file(filename, mode='r'):
with contextlib.closing(open(filename, mode)) as port:
with CURRENT_INPUT_PORT.let(port):
yield
| bsd-2-clause | 3,917,577,163,526,138,000 | 30.153846 | 68 | 0.719753 | false |
Gabvaztor/TFBoost | src/projects/Web_Traffic_Time/TFBooster_backup.py | 1 | 7343 | # -*- coding: utf-8 -*-
"""
Author: @gabvaztor
StartDate: 04/03/2017
This file contains the next information:
- Libraries to import with installation comment and reason.
- Data Mining Algorithm.
- Sets (train,validation and test) information.
- ANN Arquitectures.
- A lot of utils methods which you'll get useful advantage
The code's structure is:
- Imports
- Global Variables
- Interface
- Reading data algorithms
- Data Mining
- Training and test
- Show final conclusions
Style: "Google Python Style Guide"
https://google.github.io/styleguide/pyguide.html
Notes:
* This file use TensorFlow version >1.0.
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# IMPORTS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
'''LOCAL IMPORTS
* UtilsFunctions is a library that contains a lot of functions which will help us
to code expressively, clearly and efficiently.
* TensorFlowGUI's library contains all GUI's methods. Contains EasyGUI.
Here you can download the library: https://pypi.python.org/pypi/easygui#downloads
It had been used the version: 0.98.1
'''
import TFBoost.TFReader as tfr
import TFBoost.TFDataMining as tfd
from TFBoost.TFEncoder import Dictionary
from UsefulTools.UtilsFunctions import *
import TFBoost.TFModels as models
import SettingsObject
''' TensorFlow: https://www.tensorflow.org/
To upgrade TensorFlow to last version:
*CPU: pip3 install --upgrade tensorflow
*GPU: pip3 install --upgrade tensorflow-gpu
'''
import tensorflow as tf
print("TensorFlow: " + tf.__version__)
''' Numpy is an extension to the Python programming language, adding support for large,
multi-dimensional arrays and matrices, along with a large library of high-level
mathematical functions to operate on these arrays.
It is mandatory to install 'Numpy+MKL' before scipy.
Install 'Numpy+MKL' from here: http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
http://www.numpy.org/
https://en.wikipedia.org/wiki/NumPy '''
import numpy as np
'''
# You need to install the 64bit version of Scipy, at least on Windows.
# It is mandatory to install 'Numpy+MKL' before scipy.
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
# We can find scipi in the url: http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy'''
import scipy.io as sio
''' Matlab URL: http://matplotlib.org/users/installing.html
python -m pip3 install matplotlib'''
import matplotlib.pyplot as plt
''' TFLearn library. License MIT.
Git Clone : https://github.com/tflearn/tflearn.git
To install: pip install tflearn'''
import tflearn
'''
Sklearn(scikit-learn): Simple and efficient tools for data mining and data analysis
To install: pip3 install -U scikit-learn
'''
from sklearn.model_selection import train_test_split
"""
To install pandas: pip3 install pandas
"""
import pandas as pd
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- GLOBAL VARIABLES ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- USER INTERFACE ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""Creating user interface
#properties = eg.EasyGui()
#uf.pt("Typos GUI",properties.types)
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- READING DATA ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Setting object
setting_object_web_traffic = SettingsObject.Settings(Dictionary.string_settings_web_traffic)
# Option problem
option_problem_web_traffic = Dictionary.string_option_web_traffic_problem
# Number of classes
number_of_classes_web_traffic = 1
# Path Train: Must be a list
path_train_validation_test_sets_web_traffic = [setting_object_web_traffic.train_path,
setting_object_web_traffic.test_path,
setting_object_web_traffic.model_path,
setting_object_web_traffic.submission_path]
# Labels_set
labels_set_web_traffic = None
# Sets_Percentages
percentages_sets_web_traffic = [0.7,0.2,0.1]
# Is unique
is_an_unique_csv_web_traffic = False # If this variable is true, then only one CSV file will be passed and it will be treated like
# trainSet, validationSet(if necessary) and testSet
known_data_type_web_traffic = None # Contains the type of data if the data file contains an unique type of data. Examples: # Number
tf_reader_web_traffic = tfr.Reader(delimiter=Dictionary.string_char_comma,
paths_to_read=path_train_validation_test_sets_web_traffic,
number_of_classes=number_of_classes_web_traffic,
labels_set=labels_set_web_traffic,
is_unique_file=is_an_unique_csv_web_traffic,
known_data_type=known_data_type_web_traffic,
percentages_sets=percentages_sets_web_traffic,
type_problem=option_problem_web_traffic) # Reader Object with all information
validation_set_web_traffic = tf_reader_web_traffic.validation_set # Test Set
train_set_web_traffic = tf_reader_web_traffic.train_set # Train Set
del tf_reader_web_traffic
names_of_data = ["input_data", "validation_data", "inputs_labels", "validation_labels"]
names_of_data_updated = ["input_data_updated", "validation_data_updated", "inputs_labels", "validation_labels"]
names_dictionaries = ["input_validation_dictionary"]
# Load input, validation and labels from updated arrays where inputs are [number, float] where number is
# the page id and float is the visits' number
input_data, validation, input_labels, validation_labels = \
load_numpy_arrays_generic(path_to_load=setting_object_web_traffic.accuracies_losses_path,
names=names_of_data_updated)
models_zillow_price = models.TFModels(input_data=input_data,
input_labels=input_labels,
validation=validation,
validation_labels=validation_labels,
number_of_classes=number_of_classes_web_traffic,
setting_object=setting_object_web_traffic,
option_problem=option_problem_web_traffic,
load_model_configuration=False)
#with tf.device('/gpu:0'):
models_zillow_price.rnn_lstm_web_traffic_time()
| apache-2.0 | -1,287,549,112,972,866,000 | 41.201149 | 132 | 0.559444 | false |
nwiizo/workspace_2017 | environmental/mininet/examples/test_natnet.py | 1 | 1827 | #!/usr/bin/env python
"""
Test for natnet.py
"""
import unittest
import pexpect
from mininet.util import quietRun
class testNATNet( unittest.TestCase ):
prompt = 'mininet>'
def setUp( self ):
self.net = pexpect.spawn( 'python -m mininet.examples.natnet' )
self.net.expect( self.prompt )
def testPublicPing( self ):
"Attempt to ping the public server (h0) from h1 and h2"
self.net.sendline( 'h1 ping -c 1 h0' )
self.net.expect ( '(\d+)% packet loss' )
percent = int( self.net.match.group( 1 ) ) if self.net.match else -1
self.assertEqual( percent, 0 )
self.net.expect( self.prompt )
self.net.sendline( 'h2 ping -c 1 h0' )
self.net.expect ( '(\d+)% packet loss' )
percent = int( self.net.match.group( 1 ) ) if self.net.match else -1
self.assertEqual( percent, 0 )
self.net.expect( self.prompt )
def testPrivatePing( self ):
"Attempt to ping h1 and h2 from public server"
self.net.sendline( 'h0 ping -c 1 -t 1 h1' )
result = self.net.expect ( [ 'unreachable', 'loss' ] )
self.assertEqual( result, 0 )
self.net.expect( self.prompt )
self.net.sendline( 'h0 ping -c 1 -t 1 h2' )
result = self.net.expect ( [ 'unreachable', 'loss' ] )
self.assertEqual( result, 0 )
self.net.expect( self.prompt )
def testPrivateToPrivatePing( self ):
"Attempt to ping from NAT'ed host h1 to NAT'ed host h2"
self.net.sendline( 'h1 ping -c 1 -t 1 h2' )
result = self.net.expect ( [ '[Uu]nreachable', 'loss' ] )
self.assertEqual( result, 0 )
self.net.expect( self.prompt )
def tearDown( self ):
self.net.sendline( 'exit' )
self.net.wait()
if __name__ == '__main__':
unittest.main()
| mit | 99,539,991,926,040,020 | 31.052632 | 76 | 0.587849 | false |
edgedb/edgedb | tests/common/test_supervisor.py | 1 | 6926 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from edb.common import supervisor
from edb.common import taskgroup
from edb.testbase import server as tb
class TestSupervisor(tb.TestCase):
async def test_supervisor_01(self):
async def foo1():
await asyncio.sleep(0.1)
return 42
async def foo2():
await asyncio.sleep(0.2)
return 11
g = await supervisor.Supervisor.create()
t1 = g.create_task(foo1())
t2 = g.create_task(foo2())
await g.wait()
self.assertEqual(t1.result(), 42)
self.assertEqual(t2.result(), 11)
async def test_supervisor_02(self):
async def foo1():
await asyncio.sleep(0.1)
return 42
async def foo2():
await asyncio.sleep(0.2)
return 11
g = await supervisor.Supervisor.create()
t1 = g.create_task(foo1())
await asyncio.sleep(0.15)
t2 = g.create_task(foo2())
await g.wait()
self.assertEqual(t1.result(), 42)
self.assertEqual(t2.result(), 11)
async def test_supervisor_03(self):
async def foo1():
await asyncio.sleep(1)
return 42
async def foo2():
await asyncio.sleep(0.2)
return 11
g = await supervisor.Supervisor.create()
t1 = g.create_task(foo1())
await asyncio.sleep(0.15)
# cancel t1 explicitly, i.e. everything should continue
# working as expected.
t1.cancel()
t2 = g.create_task(foo2())
await g.wait()
self.assertTrue(t1.cancelled())
self.assertEqual(t2.result(), 11)
async def test_supervisor_04(self):
NUM = 0
t2_cancel = False
t2 = None
async def foo1():
await asyncio.sleep(0.1)
1 / 0
async def foo2():
nonlocal NUM, t2_cancel
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
t2_cancel = True
raise
NUM += 1
async def runner():
nonlocal NUM, t2
g = await supervisor.Supervisor.create()
g.create_task(foo1())
t2 = g.create_task(foo2())
await g.wait()
NUM += 10
with self.assertRaisesRegex(taskgroup.TaskGroupError,
r'1 sub errors: \(ZeroDivisionError\)'):
await self.loop.create_task(runner())
self.assertEqual(NUM, 0)
self.assertTrue(t2_cancel)
self.assertTrue(t2.cancelled())
async def test_supervisor_05(self):
NUM = 0
async def foo():
nonlocal NUM
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
NUM += 1
raise
async def runner():
g = await supervisor.Supervisor.create()
for _ in range(5):
g.create_task(foo())
await g.wait()
r = self.loop.create_task(runner())
await asyncio.sleep(0.1)
self.assertFalse(r.done())
r.cancel()
with self.assertRaises(asyncio.CancelledError):
await r
self.assertEqual(NUM, 5)
async def test_supervisor_06(self):
async def foo1():
await asyncio.sleep(1)
return 42
async def foo2():
await asyncio.sleep(2)
return 11
async def runner():
g = await supervisor.Supervisor.create()
g.create_task(foo1())
g.create_task(foo2())
await g.wait()
r = self.loop.create_task(runner())
await asyncio.sleep(0.05)
r.cancel()
with self.assertRaises(asyncio.CancelledError):
await r
async def test_supervisor_07(self):
NUM = 0
async def foo1():
nonlocal NUM
NUM += 1
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
NUM += 10
await asyncio.sleep(10)
NUM += 1000
raise
return 42
async def foo2():
nonlocal NUM
NUM += 1
await asyncio.sleep(2)
NUM += 1000
return 11
async def runner():
g = await supervisor.Supervisor.create()
g.create_task(foo1())
g.create_task(foo2())
await asyncio.sleep(0.1)
await g.cancel()
r = self.loop.create_task(runner())
await asyncio.sleep(0.5)
r.cancel()
with self.assertRaises(asyncio.CancelledError):
await r
self.assertEqual(NUM, 12)
async def test_supervisor_08(self):
NUM = 0
async def foo1():
nonlocal NUM
NUM += 1
await asyncio.sleep(1)
NUM += 1000
return 42
async def foo2():
nonlocal NUM
NUM += 1
await asyncio.sleep(2)
NUM += 1000
return 11
async def runner():
g = await supervisor.Supervisor.create()
g.create_task(foo1())
g.create_task(foo2())
await asyncio.sleep(0.1)
await g.cancel()
await runner()
self.assertEqual(NUM, 2)
async def test_supervisor_09(self):
NUM = 0
async def foo1():
nonlocal NUM
NUM += 1
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
await asyncio.sleep(0.2)
NUM += 10
raise
NUM += 1000
return 42
async def foo2():
nonlocal NUM
NUM += 1
await asyncio.sleep(2)
NUM += 1000
return 11
async def runner():
g = await supervisor.Supervisor.create()
g.create_task(foo1())
g.create_task(foo2())
await asyncio.sleep(0.1)
await g.cancel()
await runner()
self.assertEqual(NUM, 12)
| apache-2.0 | -252,408,295,634,715,870 | 23.048611 | 76 | 0.520214 | false |
mortardata/mortar-examples | luigiscripts/word-luigi-solution.py | 1 | 3104 | import luigi
from luigi import configuration
from luigi.s3 import S3Target, S3PathTask
from mortar.luigi import mortartask
"""
To run, replace <your-handle-here> with something unique to you (for instance, first initial plus last name):
mortar luigi luigiscripts/word-luigi-solution.py \
--output-path s3://mortar-example-output-data/<your-handle-here>/q-words
"""
class WordRank(mortartask.MortarProjectPigscriptTask):
# Cluster size to use for running this Pig script
# Defaults to Local Mode (no cluster),
# but larger cluster values can be passed in at runtime
cluster_size = luigi.IntParameter(default=0)
# S3 path to the script's output directory (will be passed in as a parameter at runtime)
output_path = luigi.Parameter()
def token_path(self):
"""
S3 path for Luigi to store tokens indicating
the Task has completed.
"""
return self.output_path
def script(self):
"""
Name of the Pigscript to run
(omit the .pig from the file name).
"""
return 'google_books_words'
def parameters(self):
"""
Any parameters that you want to pass to your Pig script can go here.
In this case we want the Pig job to write output to the same path
Luigi is using, which will make it easier for subsequent Luigi tasks to find
the output from the Pig job.
"""
return {
'OUTPUT_PATH': self.output_path
}
def requires(self):
"""
Dependency of this Task -- in this case Luigi uses
S3PathTask to check for the existence of input data
at a certain S3 path before running this Task.
"""
return [S3PathTask('s3://mortar-example-data/ngrams/books/20120701/eng-all/1gram/googlebooks-eng-all-1gram-20120701-q.gz')]
def script_output(self):
"""
S3 target where the Pig job's output will be stored.
This directory will be cleared of partial data
if the script fails.
"""
return [S3Target(self.output_path)]
class SanityTest(luigi.Task):
output_path = luigi.Parameter()
def requires(self):
"""
This Task takes the output of the WordRank Task as its input,
so we list WordRank as a dependency.
"""
return [WordRank(output_path=self.output_path)]
def output(self):
"""
We want this Task to write its tokens to a unique location,
defined by the name of the class.
"""
return [S3Target('%s/%s' % (self.output_path, self.__class__.__name__))]
def run(self):
"""
This Python code checks that each word in the pig script
output file begins with 'q' and returns an exception if not.
"""
file = S3Target('%s/%s/part-r-00000' % (self.output_path, 'dictionary'))
for line in file.open('r'):
if line[0] != 'q':
raise Exception("Word: %s didn't start with q" % word)
if __name__ == "__main__":
luigi.run(main_task_cls=SanityTest)
| apache-2.0 | 3,113,173,805,581,149,700 | 31.333333 | 131 | 0.619523 | false |
cjlee112/socraticqs2 | mysite/ctms/middleware.py | 1 | 10451 | import re
from collections import OrderedDict
from django.urls import reverse, resolve, NoReverseMatch
from django.conf import settings
from django.http import HttpResponseRedirect
from ct.models import Course, CourseUnit, UnitLesson, Response
from ctms.urls import urlpatterns as ctms_urls
from .views import CourseCoursletUnitMixin
# course, courslet, unit
MODEL_ORDER_TUPLE = (('course_pk', Course),
('courselet_pk', CourseUnit),
('unit_pk', UnitLesson),
('response_pk', Response))
MODELS_ORDERED_DICT = OrderedDict(MODEL_ORDER_TUPLE)
MODEL_NAMES_MAPPING = {
Course: 'course',
CourseUnit: 'courslet',
UnitLesson: 'unit',
Response: 'response'
}
NAME_MODEL_MAPPING = {
'Course': Course,
'Response': Response,
'UnitLesson': UnitLesson,
'CourseUnit': CourseUnit
}
ALLOWED_MODEL_FILTERS_MAPPING = {
Course: [''],
CourseUnit: ['']
}
def get_model_filter(model, kwargs):
"""
Add filter params to appropriate model if any
Arguments:
model (object): Model object
kwargs (dict): key, value to be added to the model filtering
"""
filter_dict = {}
for field in ALLOWED_MODEL_FILTERS_MAPPING.get(model, []):
if kwargs.get(field):
filter_dict[field] = kwargs.get(field)
return filter_dict
class SideBarUtils(object):
'''
Utils class.
'''
def __init__(self, get_response):
# before using this mixin we have to attach request to mixin's instance
self.course_mixin = CourseCoursletUnitMixin()
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def _get_model_ids(self, kwargs):
'''
:param kwargs:
:return:
'''
model_ids = dict()
last_pk = None
_, default_model = list(MODELS_ORDERED_DICT.items())[0]
for kw, model in list(MODELS_ORDERED_DICT.items()):
if kw in kwargs:
last_pk = kw
model_ids[MODELS_ORDERED_DICT[kw]] = kwargs[kw]
if 'pk' in kwargs:
if last_pk is None:
model_ids[default_model] = kwargs['pk']
else:
model_ids[
list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1][1]
] = kwargs['pk']
return model_ids
def _get_objects(self, model_ids):
'''
Yields pairs of name, object.
Name is translated according with mapping.
:param model_ids: dict returned from _get_model_ids
:return:
'''
for model, pk in list(model_ids.items()):
filter_data = {'id': pk}
if self.request.user.is_authenticated:
filter_data.update(get_model_filter(model, {}))
yield (MODEL_NAMES_MAPPING[model], model.objects.filter(**filter_data).first())
def _reverse(self, name, kwargs=None):
namespace = getattr(settings, 'CTMS_URL_NAMESPACE', 'ctms')
return reverse('{}:{}'.format(namespace, name), kwargs=kwargs)
def _get_url_kwargs(self, url):
kwarg_re = r'\(\?\P\<(\w+)>+?'
kwargs = re.findall(kwarg_re, url.regex.pattern)
return kwargs
def _get_urls(self, request):
"""
retur: dict with urls like
{
'all_urls': {
'url_name': '/some/url'
},
'course_urls':{
'1':{
'url.name': '/some/url'
}
},
'courslet_urls':
'1':{
'url.name': '/some/url'
}
},
'unit_urls':
'1':{
'url.name': '/some/url'
}
},
'response_urls':
'1':{
'url.name': '/some/url'
}
}
}
"""
all_urls = {}
def add_url(to, url_name, obj_id, url):
all_urls.setdefault(
to, {}
).setdefault(
obj_id, {}
)[url_name] = url
for url in ctms_urls:
url_kw = self._get_url_kwargs(url)
try:
kwargs = self._translate_kwargs(request, url_kw)
_url = self._reverse(url.name, kwargs)
# course_pk = kwargs.get('course_pk')
# courselet_pk = kwargs.get('courselet_pk')
if 'course' in url.name:
pk = kwargs.get('pk')
add_url('course_urls', url.name, pk, _url)
elif 'courslet' in url.name:
pk = kwargs.get('pk')
add_url('courslet_urls', url.name, pk, _url)
elif 'unit' in url.name:
pk = kwargs.get('pk')
add_url('unit_urls', url.name, pk, _url)
elif 'response' in url.name:
pk = kwargs.get('pk')
add_url('unit_urls', url.name, pk, _url)
all_urls.setdefault('all_urls', {})[url.name] = _url
except NoReverseMatch:
continue
return all_urls
def _get_model_from_request(self, request, name):
return getattr(getattr(request, MODEL_NAMES_MAPPING[name], None), 'id', None)
def _translate_kwargs(self, request, kwargs_list):
last_pk = None
_, default_model = list(MODELS_ORDERED_DICT.items())[0]
result_kwargs = {}
for kw, model in list(MODELS_ORDERED_DICT.items()):
if kw in kwargs_list:
result_kwargs[kw] = self._get_model_from_request(request, model)
last_pk = kw
if 'pk' in kwargs_list:
if last_pk is None:
result_kwargs['pk'] = self._get_model_from_request(request, default_model)
else:
next_model = list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1]
result_kwargs['pk'] = self._get_model_from_request(request, next_model[1])
return result_kwargs
class SideBarMiddleware(SideBarUtils):
def process_view(self, request, view_func, view_args, view_kwargs):
# urls = self._get_urls(request)
self.request = request
current_url = resolve(request.path_info).url_name
if 'bp' not in request.path and request.path.startswith('/ctms/') and reverse('accounts:profile_update') != request.path and \
request.path != reverse('ctms:email_sent') and '/ctms/invites/' not in request.path:
# if we are going to /ctms/ namespace except of /ctms/email_sent/
if (request.user.is_authenticated and (not getattr(request.user, 'instructor', None)
or not request.user.instructor.institution or not request.user.instructor.what_do_you_teach)):
# if we don't have instructor or instructor.institution
return HttpResponseRedirect('{}?next={}'.format(reverse('accounts:profile_update'), request.path))
if 'ctms' in request.path and current_url != 'my_courses':
model_ids = self._get_model_ids(view_kwargs)
objects = self._get_objects(model_ids)
# attach recieved objects to request object
for name, obj in objects:
setattr(request, name, obj)
# attach object id's to session
old_obj_ids = request.session.get('sidebar_object_ids', {})
obj_ids = {}
for name, cls in list(NAME_MODEL_MAPPING.items()):
old_id = old_obj_ids.get(cls.__name__)
new_id = model_ids.get(cls)
if old_id and new_id:
if new_id != old_id:
obj_ids[cls.__name__] = new_id
else:
obj_ids[cls.__name__] = new_id
elif old_id and not new_id:
obj_ids[cls.__name__] = old_id
elif not old_id and new_id:
obj_ids[cls.__name__] = new_id
request.session['sidebar_object_ids'] = obj_ids
return None
def process_template_response(self, request, response):
# add request to mixin
current_url = resolve(request.path_info).url_name
self.course_mixin.request = request
sidebar_context = {}
my_courses = self.course_mixin.get_my_courses() if request.user.is_authenticated else Course.objects.none()
sidebar_context['user_courses'] = my_courses
if 'ctms' in request.path and not request.session.get('sidebar_object_ids', {}):
for model, name in list(MODEL_NAMES_MAPPING.items()):
sidebar_context[name] = getattr(request, name, None)
# urls = self._get_urls(request)
# sidebar_context['urls'] = urls
elif request.session.get('sidebar_object_ids'):
objects = dict(self._get_objects({
model: request.session.get('sidebar_object_ids', {}).get(name)
for name, model in list(NAME_MODEL_MAPPING.items())
}))
sidebar_context.update(objects)
if sidebar_context.get('course'):
courslets = self.course_mixin.get_courselets_by_course(sidebar_context['course'])
sidebar_context['course_courslets'] = courslets
if sidebar_context.get('courslet'):
sidebar_context['courslet_units'] = self.course_mixin.get_units_by_courselet(
sidebar_context['courslet']
)
sidebar_context['current_page_name'] = current_url
sidebar_context['courselets_link_active'] = False
sidebar_context['units_link_active'] = False
if current_url == 'course_view':
sidebar_context['courselets_link_active'] = True
elif current_url == 'courslet_view':
sidebar_context['courselets_link_active'] = True
sidebar_context['units_link_active'] = True
if response.context_data:
response.context_data['sidebar'] = sidebar_context
response.render()
return response
| apache-2.0 | 3,459,832,055,753,479,700 | 36.729242 | 134 | 0.547507 | false |
TechAtNYU/overlord | overlord/reminder.py | 1 | 5326 | import os
import requests
from datetime import datetime
from dateutil.parser import parse
from dateutil import tz
from pytz import timezone
from threading import Thread
from overlord import celery
from utils import Event, Email, headers
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class ReminderEmail(Email):
def _get_time(self):
"""
Changes UTC time fetched from the API to New York Time
"""
date_time = self.event_data[0]['attributes']['startDateTime']
time = parse(date_time).replace(tzinfo=tz.gettz('UTC'))
central_time = time.astimezone(tz.gettz('America/New_York'))
return ":".join(str(central_time.time()).split(":")[0:2])
def _get_emails(self, event_id):
res = requests.get('https://api.tnyu.org/v3/events/' + event_id +
'?include=rsvps', headers=headers, verify=False)
if res.status_code != 200:
return
r = res.json()
self.event_data.append(r['data'])
for post in r['included']:
if post['attributes'].get('contact'):
if post['attributes']['roles']:
self.eboard_members.append(post)
else:
self.attendees.append(post)
def _venue_address(self):
venue_id = self.event_data[0]['relationships'][
'venue']['data']['id']
venue = requests.get(
"https://api.tnyu.org/v3/venues/" + venue_id, headers=headers)
address = venue.json()['data']['attributes']['address']
address_str = "\n".join(address.split(","))
return address_str
def _generate_emails(self, members):
address = self._venue_address()
time = self._get_time()
for i, member in enumerate(members):
msg = MIMEMultipart('alternative')
msg['Subject'] = "Confirmation for Tech@NYU's " + self.event_data[0]['attributes']['title']
msg['From'] = "Tech@NYU Feedback <" + os.environ['TNYU_EMAIL'] +">"
msg['To'] = members[i]['attributes']['contact']['email']
text = ("Hi " + members[i]['attributes']['name'] + "!\n\n" +
"This is your confirmation for the Tech@NYU " +
self.event_data[0]['attributes']['title'] + " tomorrow at " +
time + ". The event will be held at: \n\n" + address +
"\n\nWe look forward to seeing you! Feel free to reach out" +
" to us if you have any other questions. For more updates" +
" feel free to follow us on Twitter or Facebook. \n\n" +
"Thank you")
address_str = ''
for item in address.split('\n'):
address_str += item.strip() + "<br>"
html = (
"<html>" +
"<head></head>" +
"<body>" +
"<p>Hi " + members[i]['attributes']['name'] + "!</p>" +
"<p>This is your confirmation for the Tech@NYU " +
self.event_data[0]['attributes']['title'] + " tomorrow at " +
time + ". The event will be held at:</p>" +
"<p>" + address_str + "</p>" +
"<p>We look forward to seeing you! Feel free to reach out " +
"to us if you have any other questions. For more updates " +
"feel free to follow us on <a href='https://twitter.com/techatnyu'>Twitter</a> or <a href='https://www.facebook.com/TechatNYU/'>Facebook</a>.</p>"+
"<p>Thank you</p>"
"</body>" +
"</html>")
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
try:
err = self.server.sendmail(os.environ['TNYU_EMAIL'], members[i][
'attributes']['contact']['email'], msg.as_string())
if err:
print(err)
except UnicodeEncodeError:
continue
def send_emails(self, event_id):
self._get_emails(event_id)
self._generate_emails(self.eboard_members)
self._generate_emails(self.attendees)
def get_resource(sort=None):
root_url = "https://api.tnyu.org/v3/events/"
r = requests.get(root_url + "?sort=-" + sort, headers=headers)
return r.json()
def get_events_in_future():
resources = get_resource(sort="startDateTime")['data']
events = [Event(x) for x in resources]
# Change UTC to New York Time
today = timezone("America/New_York").localize(datetime.today()).date()
future_events = []
for event in events:
startDateTime = getattr(event, 'startDateTime', None)
if startDateTime:
event_date = parse(event.startDateTime).replace(tzinfo=tz.gettz('UTC')).astimezone(tz.gettz('America/New_York')).date()
# Check if the event is tomorrow
if (event_date - today).days == 1:
future_events.append(event)
return future_events
@celery.task
def send_emails():
emails = ReminderEmail()
events = get_events_in_future()
for event in events:
thr = Thread(target=emails.send_emails, args=[event.id])
thr.start()
return len(events)
| mit | 8,884,969,978,495,147,000 | 36.244755 | 163 | 0.550695 | false |
rhinstaller/blivet-gui | blivetgui/config.py | 1 | 1885 | # -*- coding: utf-8 -*-
# config.py
# Config for blivet-gui
#
# Copyright (C) 2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vojtech Trefny <[email protected]>
#
# ---------------------------------------------------------------------------- #
class BlivetGUIConfig(dict):
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self["default_fstype"] = "ext4"
self["log_dir"] = "/var/log/blivet-gui"
def __getattr__(self, name):
if name not in self.keys() and not hasattr(self, name):
raise AttributeError("BlivetGUIConfig has no attribute %s" % name)
return self[name]
def __setattr__(self, name, value):
if name not in self.keys() and not hasattr(self, name):
raise AttributeError("BlivetGUIConfig has no attribute %s" % name)
self[name] = value
def __repr__(self):
return "BlivetGUIConfig:\n%s" % str(self)
config = BlivetGUIConfig()
| gpl-2.0 | 5,412,518,088,339,522,000 | 38.270833 | 80 | 0.660477 | false |
MadeiraCloud/salt | sources/salt/modules/useradd.py | 1 | 13501 | # -*- coding: utf-8 -*-
'''
Manage users with the useradd command
'''
# Import python libs
import re
try:
import grp
import pwd
except ImportError:
pass
import logging
import copy
# Import salt libs
import salt.utils
from salt.modules import state_std
from salt._compat import string_types
log = logging.getLogger(__name__)
RETCODE_12_ERROR_REGEX = re.compile(
r'userdel(.*)warning(.*)/var/mail(.*)No such file or directory'
)
# Define the module's virtual name
__virtualname__ = 'user'
def __virtual__():
'''
Set the user module if the kernel is Linux or OpenBSD
and remove some of the functionality on OS X
'''
if 'kernel' in __grains__ and \
__grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'):
return __virtualname__
return False
def _get_gecos(name):
'''
Retrieve GECOS field info and return it in dictionary form
'''
gecos_field = pwd.getpwnam(name).pw_gecos.split(',', 3)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
return {'fullname': str(gecos_field[0]),
'roomnumber': str(gecos_field[1]),
'workphone': str(gecos_field[2]),
'homephone': str(gecos_field[3])}
def _build_gecos(gecos_dict):
'''
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''))
def add(name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
system=False,
fullname='',
roomnumber='',
workphone='',
homephone='',
createhome=True, **kwargs):
'''
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
'''
cmd = ['useradd']
if shell:
cmd.extend(['-s', shell])
if uid not in (None, ''):
cmd.extend(['-u', str(uid)])
if gid not in (None, ''):
cmd.extend(['-g', str(gid)])
elif groups is not None and name in groups:
try:
for line in salt.utils.fopen('/etc/login.defs'):
if not 'USERGROUPS_ENAB' in line[:15]:
continue
if 'yes' in line:
cmd.extend([
'-g', str(__salt__['file.group_to_gid'](name))
])
# We found what we wanted, let's break out of the loop
break
except OSError:
log.debug('Error reading /etc/login.defs', exc_info=True)
if createhome:
cmd.append('-m')
elif createhome is False:
cmd.append('-M')
if home is not None:
cmd.extend(['-d', home])
if not unique:
cmd.append('-o')
if system and __grains__['kernel'] != 'NetBSD':
cmd.append('-r')
cmd.append(name)
ret = __salt__['cmd.run_stdall'](' '.join(cmd))
state_std(kwargs, ret)
if ret['retcode'] != 0:
return False
# At this point, the user was successfully created, so return true
# regardless of the outcome of the below functions. If there is a
# problem wth changing any of the user's info below, it will be raised
# in a future highstate call. If anyone has a better idea on how to do
# this, feel free to change it, but I didn't think it was a good idea
# to return False when the user was successfully created since A) the
# user does exist, and B) running useradd again would result in a
# nonzero exit status and be interpreted as a False result.
if groups:
chgroups(name, groups)
if fullname:
chfullname(name, fullname)
if roomnumber:
chroomnumber(name, roomnumber)
if workphone:
chworkphone(name, workphone)
if homephone:
chhomephone(name, homephone)
return True
def delete(name, remove=False, force=False, **kwargs):
'''
Remove a user from the minion
CLI Example:
.. code-block:: bash
salt '*' user.delete name remove=True force=True
'''
cmd = ['userdel']
if remove:
cmd.append('-r')
if force:
cmd.append('-f')
cmd.append(name)
ret = __salt__['cmd.run_stdall'](' '.join(cmd))
state_std(kwargs, ret)
if ret['retcode'] == 0:
# Command executed with no errors
return True
if ret['retcode'] == 12:
# There's a known bug in Debian based distributions, at least, that
# makes the command exit with 12, see:
# https://bugs.launchpad.net/ubuntu/+source/shadow/+bug/1023509
if __grains__['os_family'] not in ('Debian',):
return False
if RETCODE_12_ERROR_REGEX.match(ret['stderr']) is not None:
# We've hit the bug, let's log it and not fail
log.debug(
'While the userdel exited with code 12, this is a know bug on '
'debian based distributions. See http://goo.gl/HH3FzT'
)
return True
return False
def getent(refresh=False):
'''
Return the list of all info for all users
CLI Example:
.. code-block:: bash
salt '*' user.getent
'''
if 'user.getent' in __context__ and not refresh:
return __context__['user.getent']
ret = []
for data in pwd.getpwall():
ret.append(_format_info(data))
__context__['user.getent'] = ret
return ret
def chuid(name, uid, **kwargs):
'''
Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
'''
pre_info = info(name)
if uid == pre_info['uid']:
return True
cmd = 'usermod -u {0} {1}'.format(uid, name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['uid'] != pre_info['uid']:
return str(post_info['uid']) == uid
return False
def chgid(name, gid, **kwargs):
'''
Change the default group of the user
CLI Example:
.. code-block:: bash
salt '*' user.chgid foo 4376
'''
pre_info = info(name)
if gid == pre_info['gid']:
return True
cmd = 'usermod -g {0} {1}'.format(gid, name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['gid'] != pre_info['gid']:
return str(post_info['gid']) == gid
return False
def chshell(name, shell, **kwargs):
'''
Change the default shell of the user
CLI Example:
.. code-block:: bash
salt '*' user.chshell foo /bin/zsh
'''
pre_info = info(name)
if shell == pre_info['shell']:
return True
cmd = 'usermod -s {0} {1}'.format(shell, name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['shell'] != pre_info['shell']:
return str(post_info['shell']) == shell
return False
def chhome(name, home, persist=False, **kwargs):
'''
Change the home directory of the user, pass true for persist to copy files
to the new home dir
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /home/users/foo True
'''
pre_info = info(name)
if home == pre_info['home']:
return True
cmd = 'usermod -d {0} '.format(home)
if persist:
cmd += ' -m '
cmd += name
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['home'] != pre_info['home']:
return str(post_info['home']) == home
return False
def chgroups(name, groups, append=False, **kwargs):
'''
Change the groups this user belongs to, add append to append the specified
groups
CLI Example:
.. code-block:: bash
salt '*' user.chgroups foo wheel,root True
'''
if isinstance(groups, string_types):
groups = groups.split(',')
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
cmd = 'usermod '
if append:
cmd += '-a '
cmd += '-G "{0}" {1}'.format(','.join(groups), name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
return not result['retcode']
def chfullname(name, fullname, **kwargs):
'''
Change the user's Full Name
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo "Foo Bar"
'''
fullname = str(fullname)
pre_info = _get_gecos(name)
if not pre_info:
return False
if fullname == pre_info['fullname']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['fullname'] = fullname
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['fullname'] != pre_info['fullname']:
return str(post_info['fullname']) == fullname
return False
def chroomnumber(name, roomnumber, **kwargs):
'''
Change the user's Room Number
CLI Example:
.. code-block:: bash
salt '*' user.chroomnumber foo 123
'''
roomnumber = str(roomnumber)
pre_info = _get_gecos(name)
if not pre_info:
return False
if roomnumber == pre_info['roomnumber']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['roomnumber'] = roomnumber
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['roomnumber'] != pre_info['roomnumber']:
return str(post_info['roomnumber']) == roomnumber
return False
def chworkphone(name, workphone, **kwargs):
'''
Change the user's Work Phone
CLI Example:
.. code-block:: bash
salt '*' user.chworkphone foo "7735550123"
'''
workphone = str(workphone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if workphone == pre_info['workphone']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['workphone'] = workphone
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['workphone'] != pre_info['workphone']:
return str(post_info['workphone']) == workphone
return False
def chhomephone(name, homephone, **kwargs):
'''
Change the user's Home Phone
CLI Example:
.. code-block:: bash
salt '*' user.chhomephone foo "7735551234"
'''
homephone = str(homephone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if homephone == pre_info['homephone']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['homephone'] = homephone
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
post_info = info(name)
if post_info['homephone'] != pre_info['homephone']:
return str(post_info['homephone']) == homephone
return False
def info(name):
'''
Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root
'''
try:
data = pwd.getpwnam(name)
except KeyError:
return {}
else:
return _format_info(data)
def _format_info(data):
'''
Return user information in a pretty way
'''
# Put GECOS info into a list
gecos_field = data.pw_gecos.split(',', 3)
# Make sure our list has at least four elements
while len(gecos_field) < 4:
gecos_field.append('')
return {'gid': data.pw_gid,
'groups': list_groups(data.pw_name),
'home': data.pw_dir,
'name': data.pw_name,
'passwd': data.pw_passwd,
'shell': data.pw_shell,
'uid': data.pw_uid,
'fullname': gecos_field[0],
'roomnumber': gecos_field[1],
'workphone': gecos_field[2],
'homephone': gecos_field[3]}
def list_groups(name):
'''
Return a list of groups the named user belongs to
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
'''
ugrp = set()
# Add the primary user's group
try:
ugrp.add(grp.getgrgid(pwd.getpwnam(name).pw_gid).gr_name)
except KeyError:
# The user's applied default group is undefined on the system, so
# it does not exist
pass
groups = grp.getgrall()
# Now, all other groups the user belongs to
for group in groups:
if name in group.gr_mem:
ugrp.add(group.gr_name)
return sorted(list(ugrp))
def list_users():
'''
Return a list of all users
CLI Example:
.. code-block:: bash
salt '*' user.list_users
'''
return sorted([user.pw_name for user in pwd.getpwall()])
| apache-2.0 | 715,055,845,312,761,200 | 24.425612 | 79 | 0.570624 | false |
jcpeterson/Dallinger | tests/test_recruiters.py | 1 | 28371 | import mock
import pytest
from dallinger.models import Participant
from dallinger.experiment import Experiment
class TestModuleFunctions(object):
@pytest.fixture
def mod(self):
from dallinger import recruiters
return recruiters
def test__get_queue(self, mod):
from rq import Queue
assert isinstance(mod._get_queue(), Queue)
def test_for_experiment(self, mod):
mock_exp = mock.MagicMock(spec=Experiment)
mock_exp.recruiter = mock.sentinel.some_object
assert mod.for_experiment(mock_exp) is mock_exp.recruiter
def test_by_name_with_valid_name(self, mod):
assert isinstance(mod.by_name('CLIRecruiter'), mod.CLIRecruiter)
def test_by_name_with_valid_nickname(self, mod):
assert isinstance(mod.by_name('bots'), mod.BotRecruiter)
def test_by_name_with_invalid_name(self, mod):
assert mod.by_name('blah') is None
def test_for_debug_mode(self, mod, stub_config):
r = mod.from_config(stub_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_recruiter_config_value_used_if_not_debug(self, mod, stub_config):
stub_config.extend({'mode': u'sandbox', 'recruiter': u'CLIRecruiter'})
r = mod.from_config(stub_config)
assert isinstance(r, mod.CLIRecruiter)
def test_debug_mode_trumps_recruiter_config_value(self, mod, stub_config):
stub_config.extend({'recruiter': u'CLIRecruiter'})
r = mod.from_config(stub_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_bot_recruiter_trumps_debug_mode(self, mod, stub_config):
stub_config.extend({'recruiter': u'bots'})
r = mod.from_config(stub_config)
assert isinstance(r, mod.BotRecruiter)
def test_default_is_mturk_recruiter_if_not_debug(self, mod, active_config):
active_config.extend({'mode': u'sandbox'})
r = mod.from_config(active_config)
assert isinstance(r, mod.MTurkRecruiter)
def test_replay_setting_dictates_recruiter(self, mod, active_config):
active_config.extend(
{'replay': True, 'mode': u'sandbox', 'recruiter': u'CLIRecruiter'}
)
r = mod.from_config(active_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_unknown_recruiter_name_raises(self, mod, stub_config):
stub_config.extend({'mode': u'sandbox', 'recruiter': u'bogus'})
with pytest.raises(NotImplementedError):
mod.from_config(stub_config)
class TestRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import Recruiter
return Recruiter()
def test_open_recruitment(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.open_recruitment()
def test_recruit(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.recruit()
def test_close_recruitment(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.close_recruitment()
def test_reward_bonus(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.reward_bonus('any assignment id', 0.01, "You're great!")
def test_notify_recruited(self, recruiter):
dummy = mock.NonCallableMock()
recruiter.notify_recruited(participant=dummy)
def test_notify_using(self, recruiter):
dummy = mock.NonCallableMock()
recruiter.notify_using(participant=dummy)
def test_external_submission_url(self, recruiter):
assert recruiter.external_submission_url is None
def test_rejects_questionnaire_from_returns_none(self, recruiter):
dummy = mock.NonCallableMock()
assert recruiter.rejects_questionnaire_from(participant=dummy) is None
def test_backward_compat(self, recruiter):
assert recruiter() is recruiter
@pytest.mark.usefixtures('active_config')
class TestCLIRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import CLIRecruiter
yield CLIRecruiter()
def test_recruit_recruits_one_by_default(self, recruiter):
result = recruiter.recruit()
assert len(result) == 1
def test_recruit_results_are_urls(self, recruiter):
assert '/ad?recruiter=cli&assignmentId=' in recruiter.recruit()[0]
def test_recruit_multiple(self, recruiter):
assert len(recruiter.recruit(n=3)) == 3
def test_open_recruitment_recruits_one_by_default(self, recruiter):
result = recruiter.open_recruitment()
assert len(result['items']) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert 'Search for "New participant requested:"' in result['message']
def test_open_recruitment_multiple(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result['items']) == 3
def test_open_recruitment_results_are_urls(self, recruiter):
result = recruiter.open_recruitment()
assert '/ad?recruiter=cli&assignmentId=' in result['items'][0]
def test_open_recruitment_with_zero(self, recruiter):
result = recruiter.open_recruitment(n=0)
assert result['items'] == []
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit('any assignment id')
def test_reward_bonus(self, recruiter):
recruiter.reward_bonus('any assignment id', 0.01, "You're great!")
def test_open_recruitment_uses_configured_mode(self, recruiter, active_config):
active_config.extend({'mode': u'new_mode'})
result = recruiter.open_recruitment()
assert 'mode=new_mode' in result['items'][0]
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.submitted_event() is 'AssignmentSubmitted'
@pytest.mark.usefixtures('active_config')
class TestHotAirRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import HotAirRecruiter
yield HotAirRecruiter()
def test_recruit_recruits_one_by_default(self, recruiter):
result = recruiter.recruit()
assert len(result) == 1
def test_recruit_results_are_urls(self, recruiter):
assert '/ad?recruiter=hotair&assignmentId=' in recruiter.recruit()[0]
def test_recruit_multiple(self, recruiter):
assert len(recruiter.recruit(n=3)) == 3
def test_open_recruitment_recruits_one_by_default(self, recruiter):
result = recruiter.open_recruitment()
assert len(result['items']) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert 'requests will open browser windows' in result['message']
def test_open_recruitment_multiple(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result['items']) == 3
def test_open_recruitment_results_are_urls(self, recruiter):
result = recruiter.open_recruitment()
assert '/ad?recruiter=hotair&assignmentId=' in result['items'][0]
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit('any assignment id')
def test_reward_bonus(self, recruiter):
recruiter.reward_bonus('any assignment id', 0.01, "You're great!")
def test_open_recruitment_ignores_configured_mode(self, recruiter, active_config):
active_config.extend({'mode': u'new_mode'})
result = recruiter.open_recruitment()
assert 'mode=debug' in result['items'][0]
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.submitted_event() is 'AssignmentSubmitted'
class TestSimulatedRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import SimulatedRecruiter
return SimulatedRecruiter()
def test_recruit_returns_empty_result(self, recruiter):
assert recruiter.recruit() == []
def test_recruit_multiple_returns_empty_result(self, recruiter):
assert recruiter.recruit(n=3) == []
def test_open_recruitment_returns_empty_result(self, recruiter):
assert recruiter.open_recruitment()['items'] == []
def test_open_recruitment_multiple_returns_empty_result(self, recruiter):
assert recruiter.open_recruitment(n=3)['items'] == []
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.submitted_event() is 'AssignmentSubmitted'
def test_close_recruitment(self, recruiter):
assert recruiter.close_recruitment() is None
class TestBotRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import BotRecruiter
with mock.patch.multiple('dallinger.recruiters',
_get_queue=mock.DEFAULT,
get_base_url=mock.DEFAULT) as mocks:
mocks['get_base_url'].return_value = 'fake_base_url'
r = BotRecruiter()
r._get_bot_factory = mock.Mock()
yield r
def test_recruit_returns_list(self, recruiter):
result = recruiter.recruit(n=2)
assert len(result) == 2
def test_recruit_returns_urls(self, recruiter):
result = recruiter.recruit()
assert result[0].startswith('fake_base_url')
def test_open_recruitment_returns_list(self, recruiter):
result = recruiter.open_recruitment(n=2)
assert len(result['items']) == 2
def test_open_recruitment_returns_urls(self, recruiter):
result = recruiter.open_recruitment()
assert result['items'][0].startswith('fake_base_url')
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert "recruitment started using Mock" in result['message']
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit('any assignment id')
def test_reward_bonus(self, recruiter):
recruiter.reward_bonus('any assignment id', 0.01, "You're great!")
def test_returns_specific_submission_event_type(self, recruiter):
assert recruiter.submitted_event() is 'BotAssignmentSubmitted'
@pytest.mark.usefixtures('active_config')
class TestMTurkRecruiter(object):
@pytest.fixture
def recruiter(self, active_config):
from dallinger.mturk import MTurkService
from dallinger.recruiters import MTurkRecruiter
with mock.patch.multiple('dallinger.recruiters',
os=mock.DEFAULT,
get_base_url=mock.DEFAULT) as mocks:
mocks['get_base_url'].return_value = 'http://fake-domain'
mocks['os'].getenv.return_value = 'fake-host-domain'
mockservice = mock.create_autospec(MTurkService)
active_config.extend({'mode': u'sandbox'})
r = MTurkRecruiter()
r.mturkservice = mockservice('fake key', 'fake secret', 'fake_region')
r.mturkservice.check_credentials.return_value = True
r.mturkservice.create_hit.return_value = {'type_id': 'fake type id'}
return r
def test_instantiation_fails_with_invalid_mode(self, active_config):
from dallinger.recruiters import MTurkRecruiter
from dallinger.recruiters import MTurkRecruiterException
active_config.extend({'mode': u'nonsense'})
with pytest.raises(MTurkRecruiterException) as ex_info:
MTurkRecruiter()
assert ex_info.match('"nonsense" is not a valid mode')
def test_config_passed_to_constructor_sandbox(self, recruiter):
assert recruiter.config.get('title') == 'fake experiment title'
def test_external_submission_url_sandbox(self, recruiter):
assert 'workersandbox.mturk.com' in recruiter.external_submission_url
def test_external_submission_url_live(self, recruiter):
recruiter.config.set('mode', u'live')
assert 'www.mturk.com' in recruiter.external_submission_url
def test_open_recruitment_returns_one_item_recruitments_list(self, recruiter):
result = recruiter.open_recruitment(n=2)
assert len(result['items']) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert 'HIT now published to Amazon Mechanical Turk' in result['message']
def test_open_recruitment_returns_urls(self, recruiter):
url = recruiter.open_recruitment(n=1)['items'][0]
assert url == 'https://workersandbox.mturk.com/mturk/preview?groupId=fake type id'
def test_open_recruitment_raises_if_no_external_hit_domain_configured(self, recruiter):
from dallinger.recruiters import MTurkRecruiterException
recruiter.hit_domain = None
with pytest.raises(MTurkRecruiterException):
recruiter.open_recruitment(n=1)
def test_open_recruitment_check_creds_before_calling_create_hit(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.check_credentials.assert_called_once()
def test_open_recruitment_single_recruitee_builds_hit(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
ad_url='http://fake-domain/ad?recruiter=mturk',
approve_requirement=95,
description=u'fake HIT description',
duration_hours=1.0,
keywords=[u'kw1', u'kw2', u'kw3'],
lifetime_days=1,
max_assignments=1,
notification_url=u'https://url-of-notification-route',
reward=0.01,
title=u'fake experiment title',
us_only=True,
blacklist=[],
annotation='some experiment uid',
)
def test_open_recruitment_creates_qualifications_for_experiment_app_id(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_qualification_type.assert_called_once_with(
u'some experiment uid', 'Experiment-specific qualification'
)
def test_open_recruitment_creates_qualifications_for_exp_with_group_name(self, recruiter):
recruiter.config.set('group_name', u'some group name')
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_qualification_type.assert_has_calls([
mock.call(u'some experiment uid', 'Experiment-specific qualification'),
mock.call(u'some group name', 'Experiment group qualification')
], any_order=True)
def test_open_recruitment_creates_no_qualifications_if_so_configured(self, recruiter):
recruiter.config.set('group_name', u'some group name')
recruiter.config.set('assign_qualifications', False)
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_qualification_type.assert_not_called()
def test_open_recruitment_when_qualification_already_exists(self, recruiter):
from dallinger.mturk import DuplicateQualificationNameError
mturk = recruiter.mturkservice
mturk.create_qualification_type.side_effect = DuplicateQualificationNameError
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once()
def test_open_recruitment_with_blacklist(self, recruiter):
recruiter.config.set('qualification_blacklist', u'foo, bar')
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
ad_url='http://fake-domain/ad?recruiter=mturk',
approve_requirement=95,
description='fake HIT description',
duration_hours=1.0,
lifetime_days=1,
keywords=[u'kw1', u'kw2', u'kw3'],
max_assignments=1,
notification_url='https://url-of-notification-route',
reward=0.01,
title='fake experiment title',
us_only=True,
blacklist=['foo', 'bar'],
annotation='some experiment uid',
)
def test_open_recruitment_is_noop_if_experiment_in_progress(self, a, recruiter):
a.participant()
recruiter.open_recruitment()
recruiter.mturkservice.check_credentials.assert_not_called()
def test_supresses_assignment_submitted(self, recruiter):
assert recruiter.submitted_event() is None
def test_current_hit_id_with_active_experiment(self, a, recruiter):
a.participant(hit_id=u'the hit!')
assert recruiter.current_hit_id() == 'the hit!'
def test_current_hit_id_with_no_active_experiment(self, recruiter):
assert recruiter.current_hit_id() is None
def test_recruit_auto_recruit_on_recruits_for_current_hit(self, recruiter):
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.recruit()
recruiter.mturkservice.extend_hit.assert_called_once_with(
fake_hit_id,
number=1,
duration_hours=1.0
)
def test_recruit_auto_recruit_off_does_not_extend_hit(self, recruiter):
recruiter.config['auto_recruit'] = False
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
def test_recruit_no_current_hit_does_not_extend_hit(self, recruiter):
recruiter.current_hit_id = mock.Mock(return_value=None)
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
def test_recruit_extend_hit_error_is_logged_politely(self, recruiter):
from dallinger.mturk import MTurkServiceException
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.mturkservice.extend_hit.side_effect = MTurkServiceException("Boom!")
with mock.patch('dallinger.recruiters.logger') as mock_logger:
recruiter.recruit()
mock_logger.exception.assert_called_once_with("Boom!")
def test_reward_bonus_is_simple_passthrough(self, recruiter):
recruiter.reward_bonus(
assignment_id='fake assignment id',
amount=2.99,
reason='well done!'
)
recruiter.mturkservice.grant_bonus.assert_called_once_with(
assignment_id='fake assignment id',
amount=2.99,
reason='well done!'
)
def test_reward_bonus_logs_exception(self, recruiter):
from dallinger.mturk import MTurkServiceException
recruiter.mturkservice.grant_bonus.side_effect = MTurkServiceException("Boom!")
with mock.patch('dallinger.recruiters.logger') as mock_logger:
recruiter.reward_bonus('fake-assignment', 2.99, 'fake reason')
mock_logger.exception.assert_called_once_with("Boom!")
def test_approve_hit(self, recruiter):
fake_id = 'fake assignment id'
recruiter.approve_hit(fake_id)
recruiter.mturkservice.approve_assignment.assert_called_once_with(fake_id)
def test_approve_hit_logs_exception(self, recruiter):
from dallinger.mturk import MTurkServiceException
recruiter.mturkservice.approve_assignment.side_effect = MTurkServiceException("Boom!")
with mock.patch('dallinger.recruiters.logger') as mock_logger:
recruiter.approve_hit('fake-hit-id')
mock_logger.exception.assert_called_once_with("Boom!")
@pytest.mark.xfail
def test_close_recruitment(self, recruiter):
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.close_recruitment()
recruiter.mturkservice.expire_hit.assert_called_once_with(
fake_hit_id
)
def test_notify_completed_when_group_name_not_specified(self, recruiter):
participant = mock.Mock(spec=Participant, worker_id='some worker id')
recruiter.notify_completed(participant)
recruiter.mturkservice.increment_qualification_score.assert_called_once_with(
'some experiment uid',
'some worker id',
)
def test_notify_completed_when_group_name_specified(self, recruiter):
participant = mock.Mock(spec=Participant, worker_id='some worker id')
recruiter.config.set('group_name', u'some existing group_name')
recruiter.notify_completed(participant)
recruiter.mturkservice.increment_qualification_score.assert_has_calls([
mock.call('some experiment uid', 'some worker id'),
mock.call('some existing group_name', 'some worker id')
], any_order=True)
def test_notify_completed_nonexistent_qualification(self, recruiter):
from dallinger.mturk import QualificationNotFoundException
participant = mock.Mock(spec=Participant, worker_id='some worker id')
error = QualificationNotFoundException("Ouch!")
recruiter.mturkservice.increment_qualification_score.side_effect = error
# logs, but does not raise:
recruiter.notify_completed(participant)
def test_notify_completed_skips_assigning_qualification_if_so_configured(self, recruiter):
participant = mock.Mock(spec=Participant, worker_id='some worker id')
recruiter.config.set('group_name', u'some existing group_name')
recruiter.config.set('assign_qualifications', False)
recruiter.notify_completed(participant)
recruiter.mturkservice.increment_qualification_score.assert_not_called()
def test_rejects_questionnaire_from_returns_none_if_working(self, recruiter):
participant = mock.Mock(spec=Participant, status="working")
assert recruiter.rejects_questionnaire_from(participant) is None
def test_rejects_questionnaire_from_returns_error_if_already_submitted(self, recruiter):
participant = mock.Mock(spec=Participant, status="submitted")
rejection = recruiter.rejects_questionnaire_from(participant)
assert "already sumbitted their HIT" in rejection
@pytest.mark.usefixtures('active_config')
class TestMTurkLargeRecruiter(object):
@pytest.fixture
def recruiter(self, active_config):
from dallinger.mturk import MTurkService
from dallinger.recruiters import MTurkLargeRecruiter
with mock.patch.multiple('dallinger.recruiters',
os=mock.DEFAULT,
get_base_url=mock.DEFAULT) as mocks:
mocks['get_base_url'].return_value = 'http://fake-domain'
mocks['os'].getenv.return_value = 'fake-host-domain'
mockservice = mock.create_autospec(MTurkService)
active_config.extend({'mode': u'sandbox'})
r = MTurkLargeRecruiter()
r.mturkservice = mockservice('fake key', 'fake secret', 'fake_region')
r.mturkservice.check_credentials.return_value = True
r.mturkservice.create_hit.return_value = {'type_id': 'fake type id'}
return r
def test_open_recruitment_is_noop_if_experiment_in_progress(self, a, recruiter):
a.participant()
recruiter.open_recruitment()
recruiter.mturkservice.check_credentials.assert_not_called()
def test_open_recruitment_single_recruitee(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
ad_url='http://fake-domain/ad?recruiter=mturklarge',
approve_requirement=95,
description='fake HIT description',
duration_hours=1.0,
keywords=['kw1', 'kw2', 'kw3'],
lifetime_days=1,
max_assignments=10,
notification_url='https://url-of-notification-route',
reward=0.01,
title='fake experiment title',
us_only=True,
blacklist=[],
annotation='some experiment uid',
)
def test_more_than_ten_can_be_recruited_on_open(self, recruiter):
recruiter.open_recruitment(n=20)
recruiter.mturkservice.create_hit.assert_called_once_with(
ad_url='http://fake-domain/ad?recruiter=mturklarge',
approve_requirement=95,
description='fake HIT description',
duration_hours=1.0,
keywords=['kw1', 'kw2', 'kw3'],
lifetime_days=1,
max_assignments=20,
notification_url='https://url-of-notification-route',
reward=0.01,
title='fake experiment title',
us_only=True,
blacklist=[],
annotation='some experiment uid',
)
def test_recruit_participants_auto_recruit_on_recruits_for_current_hit(self, recruiter):
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.open_recruitment(n=1)
recruiter.recruit(n=9)
recruiter.mturkservice.extend_hit.assert_not_called()
recruiter.recruit(n=1)
recruiter.mturkservice.extend_hit.assert_called_once_with(
'fake HIT id',
duration_hours=1.0,
number=1
)
def test_recruiting_partially_from_preallocated_pool(self, recruiter):
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.open_recruitment(n=1)
recruiter.recruit(n=5)
recruiter.mturkservice.extend_hit.assert_not_called()
recruiter.recruit(n=10)
recruiter.mturkservice.extend_hit.assert_called_once_with(
'fake HIT id',
duration_hours=1.0,
number=6
)
def test_recruit_auto_recruit_off_does_not_extend_hit(self, recruiter):
recruiter.config['auto_recruit'] = False
fake_hit_id = 'fake HIT id'
recruiter.current_hit_id = mock.Mock(return_value=fake_hit_id)
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
@pytest.mark.usefixtures('active_config', 'db_session')
class TestMultiRecruiter(object):
@pytest.fixture
def recruiter(self, active_config):
from dallinger.recruiters import MultiRecruiter
active_config.extend({'recruiters': u'cli: 2, hotair: 1'})
return MultiRecruiter()
def test_parse_spec(self, recruiter):
assert recruiter.spec == [
('cli', 2),
('hotair', 1),
]
def test_pick_recruiter(self, recruiter):
subrecruiter = recruiter.pick_recruiter()
assert subrecruiter.nickname == 'cli'
subrecruiter = recruiter.pick_recruiter()
assert subrecruiter.nickname == 'cli'
subrecruiter = recruiter.pick_recruiter()
assert subrecruiter.nickname == 'hotair'
with pytest.raises(Exception):
recruiter.pick_recruiter()
def test_open_recruitment(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result['items']) == 3
assert result['items'][0].startswith('http://0.0.0.0:5000/ad?recruiter=cli')
assert result['items'][1].startswith('http://0.0.0.0:5000/ad?recruiter=cli')
assert result['items'][2].startswith('http://0.0.0.0:5000/ad?recruiter=hotair')
def test_recruit(self, recruiter):
result = recruiter.recruit(n=3)
assert len(result) == 3
assert result[0].startswith('http://0.0.0.0:5000/ad?recruiter=cli')
assert result[1].startswith('http://0.0.0.0:5000/ad?recruiter=cli')
assert result[2].startswith('http://0.0.0.0:5000/ad?recruiter=hotair')
def test_close_recruitment(self, recruiter):
patch1 = mock.patch('dallinger.recruiters.CLIRecruiter.close_recruitment')
patch2 = mock.patch('dallinger.recruiters.HotAirRecruiter.close_recruitment')
with patch1 as f1, patch2 as f2:
recruiter.close_recruitment()
f1.assert_called_once()
f2.assert_called_once()
| mit | -7,706,319,331,302,367,000 | 39.41453 | 94 | 0.661944 | false |
0sw4l/Cuke | DjFacilito/settings.py | 1 | 2092 | """
Django settings for DjFacilito project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u-gyp9q#^fybll8fw5#kj10#d(^j)kek5462zm@r1u3bm#g^^2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'TodoList',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjFacilito.urls'
WSGI_APPLICATION = 'DjFacilito.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es-CO'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | 2,898,602,150,506,420,700 | 23.325581 | 71 | 0.724187 | false |
murarth/cashan | cashan_game.py | 1 | 103880 | #!/usr/bin/python3
# -*- coding: utf-8
import argparse
from collections import Counter, defaultdict, deque
import curses
import itertools
import json
import os
import random
import socket
import sys
import threading
import time
from cashan import *
from game import *
from network import *
VERSION = '0.0.4'
# High frequency numbers
(COLOR_HIGH,
# Items in 'Buy' that cannot be bought or cards in 'Play' that cannot be played
COLOR_INVALID,
# Terrain colors
COLOR_HILLS, COLOR_FOREST, COLOR_MOUNTAINS,
COLOR_FIELDS, COLOR_PASTURE, COLOR_DESERT,
# Development card colors
COLOR_KNIGHT, COLOR_PROGRESS, COLOR_VICTORY,
# Color for special cards, LargestArmy and LongestRoad
COLOR_SPECIAL,
# Colors for player units
COLOR_PLAYER0, COLOR_PLAYER1, COLOR_PLAYER2, COLOR_PLAYER3,
# 'any' (for harbors) uses no color
COLOR_ANY,
) = range(1, 18)
# Resource colors match their terrain
(COLOR_BRICK, COLOR_LUMBER, COLOR_ORE, COLOR_GRAIN, COLOR_WOOL) = (
COLOR_HILLS, COLOR_FOREST, COLOR_MOUNTAINS,
COLOR_FIELDS, COLOR_PASTURE)
# State.priority values
PRI_NORMAL, PRI_HIGH = range(2)
class CashanGame(Game):
GAME_TITLE = 'Cashan'
def __init__(self, stdscr, args, config, state, play_state, connection = None):
super().__init__(stdscr)
self.args = args
self.config = config
# Connection to multiplayer server; None when offline
self.connection = connection
# Starting game state
self.cashan = state
# Starting state of gameplay
self.play_state = play_state
# Messages received from server
self.messages = deque()
self.key_callbacks = {
ord('q'): self.quit_game,
ord('\n'): self.advance_actions,
ord(' '): self.skip_actions,
}
def init_colors(self):
super().init_colors()
curses.init_pair(COLOR_ANY, -1, -1)
curses.init_pair(COLOR_HIGH, curses.COLOR_RED, -1)
curses.init_pair(COLOR_INVALID, curses.COLOR_RED, -1)
curses.init_pair(COLOR_HILLS, curses.COLOR_RED, -1)
curses.init_pair(COLOR_FOREST, curses.COLOR_GREEN, -1)
curses.init_pair(COLOR_MOUNTAINS, curses.COLOR_BLUE, -1)
curses.init_pair(COLOR_FIELDS, curses.COLOR_MAGENTA, -1)
curses.init_pair(COLOR_PASTURE, curses.COLOR_CYAN, -1)
curses.init_pair(COLOR_DESERT, curses.COLOR_YELLOW, -1)
curses.init_pair(COLOR_KNIGHT, curses.COLOR_MAGENTA, -1)
curses.init_pair(COLOR_PROGRESS, curses.COLOR_GREEN, -1)
curses.init_pair(COLOR_VICTORY, curses.COLOR_YELLOW, -1)
curses.init_pair(COLOR_SPECIAL, curses.COLOR_CYAN, -1)
curses.init_pair(COLOR_PLAYER0, curses.COLOR_RED, -1)
curses.init_pair(COLOR_PLAYER1, curses.COLOR_BLUE, -1)
curses.init_pair(COLOR_PLAYER2, curses.COLOR_GREEN, -1)
curses.init_pair(COLOR_PLAYER3, curses.COLOR_MAGENTA, -1)
def start_game(self):
# Player who is currently taking their turn
self.player_turn = self.play_state.player_turn
# Game phase; 'setup' or 'play'
self.phase = self.play_state.phase
# Sequence number of any active trade offer
self.active_trade = None
# Index into cashan.players that refers to the human player
self.self_player = self.play_state.self_player
# Locally simulated players
self.ai_players = self.play_state.ai_players
# Messages to indicate actions of other players
self.action_messages = deque()
# Whether to automatically skip action messages
self.skip_actions_flag = False
self.states = []
if self.connection is not None:
self.start_connection_thread()
# And off we go!
self.push_state(self.start_turn())
def quit_game(self):
if self.connection is None:
super().quit_game()
else:
pass # TODO: Do something here
def start_connection_thread(self):
threading.Thread(target = self.connection_loop, daemon = True).start()
def connection_loop(self):
while not self.quit:
msg = self.connection.recv_message()
if msg is None:
self.messages.append({ 'action': 'error', 'error': 'connection closed' })
break
action = msg.get('action')
if action == 'ping':
self.connection.write_message({ 'action': 'pong' })
continue
self.messages.append(msg)
def send_message(self, msg):
if self.connection is not None:
self.connection.write_message({ 'action': 'send', 'body': msg })
def player_is_local(self, player):
return player == self.self_player or player in self.ai_players
@property
def current_player(self):
return self.cashan.players[self.player_turn]
def next_turn(self):
'''Advances player turn and calls start_turn'''
self.skip_actions_flag = False
if self.player_turn is None:
self.player_turn = 0
elif self.phase == 'setup-2':
self.player_turn = (self.player_turn - 1) % len(self.cashan.players)
else:
self.player_turn = (self.player_turn + 1) % len(self.cashan.players)
return self.start_turn()
def save_game_state(self):
try:
save_game(self.args.save_name, self)
except Exception as e:
log('failed to save game:', e)
def start_turn(self):
'''Returns the starting State for the current player's turn'''
if self.player_turn is None:
self.player_turn = 0
if self.phase == 'setup' and self.setup_ended(1):
# For fairness, the second half of setup begins with the last
# player and turns are taken in reverse order.
self.phase = 'setup-2'
self.player_turn = len(self.cashan.players) - 1
elif self.phase == 'setup-2' and self.setup_ended(2):
self.start_play()
if self.player_turn == 0 and self.args.save_name:
self.save_game_state()
if self.player_is_local(self.player_turn):
if self.phase.startswith('setup'):
state = BuildSettlement(self,
[(pos, intr) for pos, intr in self.cashan.starting_positions
if not self.cashan.building_exists(pos, intr)])
elif self.phase == 'play':
state = StartTurn(self)
else:
raise Exception('start turn on phase {!r}'.format(self.phase))
else:
state = WaitRemote(self)
return state
def setup_ended(self, n):
settlements = Counter(o.owner
for p, o in self.cashan.buildings.items())
return all(settlements[p] == n for p in range(len(self.cashan.players)))
def start_play(self):
'''Called to end the 'setup' phase and begin the 'play' phase'''
self.phase = 'play'
# 'setup-2' is in reverse order, so we set the normal order again.
self.player_turn = 0
players = self.cashan.players
resources = { n: resource_cards(0) for n in range(len(players)) }
# Award resources to each player for each tile adjacent to a settlement
for (pos, intr), bldg in self.cashan.buildings:
for p, _ in adjacent_to_intersection(pos, intr):
cell = self.cashan.grid.get(p)
if cell is not None and cell.terrain.resource is not None:
resources[bldg.owner][cell.terrain.resource] += 1
for player, res in resources.items():
for r, n in res.items():
self.cashan.resources[r] -= n
players[player].resources[r] += n
self.add_action_message(player, 'resource_produced', res)
def get_state(self, ty):
if self.states:
state = self.states[-1]
if isinstance(state, ty):
return state
raise Exception('get_state ty expected {}; found {!r}'
.format(ty.__name__, state))
raise Exception('get_state ty expected {}; found no state'.format(ty.__name__))
def pop_state(self, ty):
state = self.states.pop()
if not isinstance(state, ty):
raise Exception('expected state of type {}; found {!r}'
.format(ty.__name__, state))
def push_state(self, state):
'''
Registers a state object to preferentially receive user input.
The state is a callable accepting a single argument,
the character input value.
It returns one of the following:
CONSUME - Consume the event and maintain state.
PASS - Pass the event to the next and maintain state.
DIE - Consume the event and remove state.
'''
if state is None:
raise RuntimeError('None state')
self.states.append(state)
def remove_state(self, ty):
for i in reversed(range(len(self.states))):
if isinstance(self.states[i], ty):
del self.states[i]
break
else:
raise Exception('no state of type {} found; states are {!r}'
.format(ty.__name__, self.states))
def handle_input(self, ch):
self.queue_redraw = True
consume = False
# TODO: Skipping messages on your own turn will cause too much skipping.
# A better concept of "skipping" is required to resolve this hack.
self.skip_actions_flag = False
if not self.action_messages:
if self.states:
i = len(self.states) - 1
state = self.states[i]
if state.accepts_input(self.self_player):
ret = state.player_input(ch)
if ret == DIE:
consume = True
del self.states[i]
elif ret == CONSUME:
consume = True
if not consume:
cb = self.key_callbacks.get(ch)
if cb:
cb()
self.handle_events()
if not self.states and not self.action_messages:
self.end_turn()
def handle_events(self):
while not self.action_messages and self.messages:
msg = self.messages.popleft()
self.handle_remote(msg)
self.queue_redraw = True
if not self.action_messages and self.states:
i = len(self.states) - 1
state = self.states[i]
for p in self.ai_players:
if state.accepts_input(p):
ret = ai_driver(self, p, state)
if ret == CONSUME:
del self.states[i]
break
elif ret == PASS:
pass
else:
raise RuntimeError('ai_driver returned {!r}'.format(ret))
if not self.states and not self.action_messages:
self.end_turn()
def after_tick(self):
super().after_tick()
self.handle_events()
def handle_remote(self, msg):
action = msg.get('action')
if action == 'error':
raise ClientError(msg.get('error'))
elif action == 'leave':
name = msg.get('name')
self.set_message('{} has left the game'.format(name), None)
elif action == 'acquire_resources':
self.acquire_resources({ Resource.get(r): n
for r, n in msg.get('resources', dict).items() })
elif action == 'declare_victory':
self.declare_victory()
elif action == 'dev_card':
self.buy_development_card()
elif action == 'end_turn':
self.end_turn()
elif action == 'move_robber':
self.cashan.robber = msg.get('pos', Position)
self.add_action_message(self.player_turn, 'move_robber', ())
elif action == 'place_building':
item = Item.get(msg.get('type'))
self.place_building_by(msg.get('player', int),
item, msg.get('pos', Position), msg.get('intr', Intr))
elif action == 'place_road':
self.place_road_by(msg.get('player', int),
msg.get('pos', Position), msg.get('edge', Edge))
elif action == 'play_card':
card = Development.get(msg.get('card'))
self.play_card(card)
elif action == 'propose_trade':
mode = msg.get('mode')
trade_id = msg.get('trade_id', int)
n = msg.get('n', int)
resource = Resource.get(msg.get('resource'))
self.propose_trade(mode, n, resource, trade_id = trade_id)
elif action == 'purchase':
item = Item.get(msg.get('item'))
self.player_purchase(item)
elif action == 'roll':
self.handle_roll(msg.get('n', int))
elif action == 'set_discard':
player = msg.get('player', int)
resources = { Resource.get(r): n
for r, n in msg.get('resources', dict).items() }
state = self.get_state(HalveResources)
if state.set_discard(player, resources):
self.remove_state(HalveResources)
elif action == 'steal':
self.resource_stolen(self.player_turn,
msg.get('target', int), Resource.get(msg.get('resource')))
elif action == 'steal_fail':
self.add_action_message(self.player_turn, 'steal_fail',
msg.get('target', int))
elif action == 'take_resource':
self.take_all_resource(Resource.get(msg.get('resource')))
elif action == 'trade_bank':
n_give, r_give = msg.get('give', list)
n_recv, r_recv = msg.get('recv', list)
self.trade_with_bank(
(n_give, Resource.get(r_give)),
(n_recv, Resource.get(r_recv)))
elif action == 'trade_offer':
player = msg.get('player', int)
n = msg.get('n', int)
resource = Resource.get(msg.get('resource'))
trade_id = msg.get('trade_id', int)
self.trade_offer(player, n, resource, trade_id = trade_id)
elif action == 'trade_player':
other = msg.get('other', int)
n_give, r_give = msg.get('give', list)
n_recv, r_recv = msg.get('recv', list)
self.trade_with_player(other,
n_give, Resource.get(r_give),
n_recv, Resource.get(r_recv))
elif action == 'trade_reject':
player = msg.get('player', int)
trade_id = msg.get('trade_id', int)
self.reject_trade_offer(player, trade_id = trade_id)
elif action == 'withdraw_trade':
mode = msg.get('mode')
self.withdraw_trade(mode)
else:
raise Exception('unrecognized remote action: {!r}'.format(action))
def prompt_confirmation(self, msg, cb):
'''
Prompts for a yes or no response. If input 'y' is received,
the given callback is called with no arguments. If any other
input is received, input grab is released and nothing is called.
'''
# TODO: Do this without altering game state.
raise NotImplementedError
def declare_victory(self):
self.phase = 'end'
self.push_state(Victory(self))
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'declare_victory' })
def get_placing(self):
winner = self.player_turn
places = [{ 'player': i, 'points': self.cashan.count_victory_points(p) }
for i, p in enumerate(self.cashan.players)]
places.sort(key = lambda p: (p['player'] == winner, p['points']),
reverse = True)
prev = None
prev_place = None
for i, p in enumerate(places, 1):
if prev_place != 1 and p['points'] == prev:
p['place'] = prev_place
else:
p['place'] = i
prev = p['points']
prev_place = i
return places
def end_turn(self):
'''Called when a player's turn has ended'''
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'end_turn' })
else:
self.pop_state(WaitRemote)
state = self.next_turn()
if state is not None:
self.states.append(state)
def add_action_message(self, player, action, params):
if not self.skip_actions_flag:
self.action_messages.append((player, action, params))
self.queue_redraw = True
def advance_actions(self):
self.skip_actions_flag = False
if self.action_messages:
self.action_messages.popleft()
self.redraw()
def skip_actions(self):
if self.action_messages:
self.skip_actions_flag = True
self.action_messages.clear()
def push_action(self, player, msg):
if not self.skip_actions_flag:
self.action_messages.append((player, msg))
self.redraw()
def buy_development_card(self):
if self.player_is_local(self.player_turn):
self.player_purchase(DevelopmentCard)
devc = self.cashan.development_cards.pop()
self.current_player.development_cards[devc] += 1
self.add_action_message(self.player_turn, 'dev_card', devc)
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'dev_card' })
return devc
def place_settlement(self, pos, intr):
'''Places a settlement as the current player'''
if not self.phase.startswith('setup'):
self.player_purchase(Settlement)
self.place_building_by(self.player_turn, Settlement, pos, intr)
def place_city(self, pos, intr):
self.player_purchase(City)
self.place_building_by(self.player_turn, City, pos, intr)
def place_building_by(self, player, item: Item, pos, intr):
p = self.cashan.players[player]
if item is Settlement:
dec(p, 'settlements')
elif item is City:
dec(p, 'cities')
p.settlements += 1
else:
raise Exception('unexpected building type: {!r}'.format(item))
self.cashan.buildings[(pos, intr)] = Object(item, player)
if player != self.self_player:
self.add_action_message(player, 'place_building', (item, pos, intr))
if self.player_is_local(player):
self.send_message({ 'action': 'place_building',
'player': player, 'type': item.name, 'pos': pos, 'intr': intr })
self.check_longest_road()
def place_road(self, pos, edge, *, no_cost = False):
if not no_cost:
self.player_purchase(Road)
self.place_road_by(self.player_turn, pos, edge)
def place_road_by(self, player, pos, edge):
p = self.cashan.players[player]
dec(p, 'roads')
self.cashan.roads[(pos, edge)] = Object(Road, player)
if player != self.self_player:
self.add_action_message(player, 'place_road', (pos, edge))
if self.player_is_local(player):
self.send_message({ 'action': 'place_road',
'player': player, 'pos': pos, 'edge': edge })
self.check_longest_road()
def check_largest_army(self):
players = self.cashan.players
if LargestArmy in self.cashan.special_cards:
pl = [(i, p.played_development_cards[Knight])
for i, p in enumerate(players)]
pl.sort(key = lambda p: p[1], reverse = True)
p_max, n_max = pl[0]
if n_max >= MIN_LARGEST_ARMY:
self.cashan.special_cards.remove(LargestArmy)
players[p_max].special_cards.add(LargestArmy)
self.add_action_message(p_max, 'largest_army', (n_max, None))
else:
pl = [(i, p.played_development_cards[Knight],
LargestArmy in p.special_cards)
for i, p in enumerate(players)]
# In case of a tie, the player already holding LargestArmy wins.
pl.sort(key = lambda i: i[1:], reverse = True)
p_max, n_max, has_largest = pl[0]
if not has_largest:
idx = index_of(pl, lambda p: p[2])
taken = pl[idx][0]
players[taken].special_cards.remove(LargestArmy)
players[p_max].special_cards.add(LargestArmy)
self.add_action_message(p_max, 'largest_army', (n_max, taken))
def check_longest_road(self):
'''
Checks whether a player should be awarded the Longest Road
card and, if so, awards it.
'''
players = self.cashan.players
if LongestRoad in self.cashan.special_cards:
pl = [(i, self.cashan.longest_road(i))
for i in range(len(players))]
pl.sort(key = lambda i: i[1], reverse = True)
p_max, n_max = pl[0]
if n_max >= MIN_LONGEST_ROAD:
self.cashan.special_cards.remove(LongestRoad)
players[p_max].special_cards.add(LongestRoad)
self.add_action_message(p_max, 'longest_road', (n_max, None))
else:
pl = [(i, self.cashan.longest_road(i),
LongestRoad in players[i].special_cards)
for i in range(len(players))]
# In case of a tie, the player already holding LongestRoad wins.
pl.sort(key = lambda i: i[1:], reverse = True)
p_max, n_max, has_longest = pl[0]
if not has_longest:
idx = index_of(pl, lambda p: p[2])
taken = pl[idx][0]
players[taken].special_cards.remove(LongestRoad)
players[p_max].special_cards.add(LongestRoad)
self.add_action_message(p_max, 'longest_road', (n_max, taken))
def play_card(self, card):
dec_item(self.current_player.development_cards, card)
if self.player_turn != self.self_player:
self.add_action_message(self.player_turn, 'play_card', card)
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'play_card', 'card': card.name })
if card is Knight:
self.current_player.played_development_cards[Knight] += 1
self.activate_robber(False)
self.check_largest_army()
elif card is Monopoly:
if self.player_is_local(self.player_turn):
self.push_state(SelectResource(self))
elif card is RoadBuilding:
if self.player_is_local(self.player_turn):
self.push_state(BuildTwoRoads(self))
elif card is YearOfPlenty:
if self.player_is_local(self.player_turn):
self.push_state(SelectResourceCards(self))
def take_all_resource(self, resource):
'''
Gives to the current player all of the given resource
from every other player
'''
player = self.current_player
total = 0
for i, other in enumerate(self.cashan.players):
if other is not player:
n = other.resources[resource]
if n:
total += n
other.resources[resource] = 0
player.resources[resource] += n
self.add_action_message(self.player_turn,
'take_resource', (i, resource, n))
if not total:
self.add_action_message(self.player_turn, 'take_none', resource)
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'take_resource',
'resource': resource.name })
def acquire_resources(self, resources):
'''
Gives the current player the given set of resources from the "bank."
'''
player = self.current_player
for r, n in resources.items():
player.resources[r] += n
self.cashan.resources[r] -= n
self.add_action_message(self.player_turn, 'acquire_resources', resources)
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'acquire_resources',
'resources': { r.name: n for r, n in resources.items() if n } })
def player_discard(self, player, resources):
'''Discards resources from a player's hand'''
p = self.cashan.players[player]
for r, n in resources.items():
p.resources[r] -= n
self.cashan.resources[r] += n
self.add_action_message(player, 'discard_resource', resources)
def player_set_discard(self, player, resources):
if self.player_is_local(player):
self.send_message({ 'action': 'set_discard',
'player': player,
'resources': { r.name: n for r, n in resources.items() } })
def propose_trade(self, mode, n, resource, *, trade_id = None):
self.push_state(TradeOffer(self, mode, n, resource))
if self.player_is_local(self.player_turn):
self.active_trade = trade_id = gen_state_id()
self.send_message({ 'action': 'propose_trade', 'mode': mode,
'trade_id': trade_id, 'n': n, 'resource': resource.name })
else:
if trade_id is None:
raise Exception('remote propose_trade missing trade_id')
self.active_trade = trade_id
def reject_trade_offer(self, player, *, trade_id = None):
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'trade_reject',
'trade_id': self.active_trade,
'player': player })
else:
if trade_id == self.active_trade:
state = self.get_state(TradeOffer)
state.reject_offer(player = player)
def trade_offer(self, player, n, resource, *, trade_id = None):
if self.player_is_local(player):
self.send_message({ 'action': 'trade_offer',
'trade_id': self.active_trade,
'player': player, 'n': n, 'resource': resource.name })
else:
if trade_id == self.active_trade:
state = self.get_state(TradeOffer)
state.submit_offer(player, n, resource)
def withdraw_trade(self, mode):
self.active_trade = None
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'withdraw_trade', 'mode': mode })
else:
if self.player_turn != self.self_player:
self.add_action_message(self.player_turn, 'withdraw_trade', mode)
self.pop_state(TradeOffer)
def trade_with_bank(self, give, recv):
player = self.current_player
n_give, r_give = give
n_recv, r_recv = recv
player.resources[r_give] -= n_give
player.resources[r_recv] += n_recv
self.cashan.resources[r_give] += n_give
self.cashan.resources[r_recv] -= n_recv
if self.player_turn != self.self_player:
self.add_action_message(self.player_turn, 'trade_bank', (give, recv))
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'trade_bank',
'give': [n_give, r_give.name],
'recv': [n_recv, r_recv.name] })
def trade_with_player(self, other, n_give, r_give, n_recv, r_recv):
player = self.current_player
other_p = self.cashan.players[other]
player.resources[r_give] -= n_give
player.resources[r_recv] += n_recv
other_p.resources[r_recv] -= n_recv
other_p.resources[r_give] += n_give
self.add_action_message(self.player_turn, 'trade_player',
(other, n_give, r_give, n_recv, r_recv))
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'trade_player', 'other': other,
'give': (n_give, r_give.name), 'recv': (n_recv, r_recv.name) })
else:
self.pop_state(TradeOffer)
def player_roll(self):
n = roll()
self.send_message({ 'action': 'roll', 'n': n })
self.handle_roll(n)
def handle_roll(self, n):
self.add_action_message(self.player_turn, 'roll', n)
if n == 7:
self.activate_robber(halve = True)
else:
self.produce_resources(n)
def produce_resources(self, n):
robber = self.cashan.robber
n_players = len(self.cashan.players)
# { resource: { player: n } }
resources = resource_cards(defaultdict(int))
# [ player: { resource: n } ]
player_r = [resource_cards(0) for i in range(n_players)]
# Resources to skip because there aren't enough for everyone
skip = []
for pos, cell in self.cashan.grid:
if cell.number == n and pos != robber:
for pos, intr in intersections(pos):
bldg = self.cashan.buildings.get((pos, intr))
if bldg is not None:
n_res = 1 if bldg.ty is Settlement else 2
resources[cell.terrain.resource][bldg.owner] += n_res
for r, counts in resources.items():
if sum(counts.values()) > self.cashan.resources[r]:
# If only one player is receiving, give them what's left
if len(counts) == 1:
pl = next(iter(counts.keys()))
player_r[pl][r] = self.cashan.resources[r]
else:
skip.append(r)
else:
for pl, n in counts.items():
player_r[pl][r] = n
for i, res in enumerate(player_r):
if any(res.values()):
for r, n in res.items():
self.cashan.players[i].resources[r] += n
self.cashan.resources[r] -= n
self.add_action_message(i, 'resource_produced', res)
for r in skip:
self.add_action_message(None, 'resource_exhausted', r)
def activate_robber(self, halve):
'''
Activates the robber, either by rolling a '7' or the playing
of a Knight card.
In the former case only, player resources may be halved.
'''
if halve:
if self.halve_resources():
# HalveResources will trigger further action upon completion
return
if self.player_is_local(self.player_turn):
self.push_state(SelectCell(self,
'Move robber', self.move_robber, deny = self.cashan.robber))
def move_robber(self, pos):
self.cashan.robber = pos
self.send_message({ 'action': 'move_robber', 'pos': pos })
if self.player_turn != self.self_player:
self.add_action_message(self.player_turn, 'move_robber', ())
targets = set()
for pos, intr in intersections(pos):
obj = self.cashan.buildings.get((pos, intr))
if obj is not None and obj.owner != self.player_turn:
targets.add(obj.owner)
targets = list(targets)
if self.player_is_local(self.player_turn):
if len(targets) == 1:
self.steal_resource(targets[0])
elif len(targets) > 1:
self.push_state(StealFrom(self, targets))
def steal_resource(self, target):
'''Steals one random resource card from the target player'''
target_p = self.cashan.players[target]
choices = sum([[r] * n for r, n in target_p.resources.items()], [])
if not choices:
self.add_action_message(self.player_turn, 'steal_fail', target)
self.send_message({ 'action': 'steal_fail', 'target': target })
else:
r = random.choice(choices)
target_p.resources[r] -= 1
self.current_player.resources[r] += 1
self.add_action_message(self.player_turn, 'steal', (target, r))
self.send_message({ 'action': 'steal',
'target': target, 'resource': r.name })
def resource_stolen(self, player, target, resource):
p = self.cashan.players[player]
t = self.cashan.players[target]
p.resources[resource] += 1
t.resources[resource] -= 1
self.add_action_message(player, 'steal', (target, resource))
def halve_resources(self):
players = {}
for i, p in enumerate(self.cashan.players):
n = sum(p.resources.values())
if n > 7:
players[i] = n // 2
if players:
self.push_state(HalveResources(self, players))
return True
return False
def road_places(self):
'''Returns a list of (pos, edge) where the player may build a Road'''
res = set()
for (rpos, edge), obj in self.cashan.roads:
if obj.owner == self.player_turn:
for pos, e in edge_adjacent(rpos, edge):
if self.cashan.can_build_road(self.player_turn, pos, e):
res.add((pos, e))
return list(res)
def settlement_places(self):
'''Returns a list of (pos, intr) where the player may build a Settlement'''
res = set()
for (rpos, edge), obj in self.cashan.roads:
if obj.owner == self.player_turn:
for pos, intr in edge_intersections(rpos, edge):
if self.cashan.can_build_settlement(self.player_turn, pos, intr):
res.add((pos, intr))
return list(res)
def city_places(self):
'''Returns a list of (pos, intr) where the player may build a City'''
res = []
for pos, obj in self.cashan.buildings:
if obj.ty is Settlement and obj.owner == self.player_turn:
res.append(pos)
return res
def player_purchase(self, item: Item):
'''
Purchases an item for the local player.
Resources are transferred from the player to the "bank".
Raises an exception if the player lacks the resources.
'''
player = self.cashan.players[self.player_turn]
for n, res in item.cost:
if player.resources[res] < n:
raise Exception('insufficient {} to purchase {}'
.format(res.name, item.name))
player.resources[res] -= n
self.cashan.resources[res] += n
if self.player_is_local(self.player_turn):
self.send_message({ 'action': 'purchase', 'item': item.name })
def draw_display(self, y, x):
brick = self.cashan.resources[Brick]
lumber = self.cashan.resources[Lumber]
ore = self.cashan.resources[Ore]
grain = self.cashan.resources[Grain]
wool = self.cashan.resources[Wool]
dev = len(self.cashan.development_cards)
# Draw card counts owned by "banker"
win = self.stdscr
win.addstr(1, 2, 'Available(Dev[{:>2}]'.format(dev))
x_off = 19
if LargestArmy in self.cashan.special_cards:
win.addstr(1, x_off + 1, 'LA', curses.color_pair(COLOR_SPECIAL))
x_off += 3
if LongestRoad in self.cashan.special_cards:
win.addstr(1, x_off + 1, 'LR', curses.color_pair(COLOR_SPECIAL))
x_off += 3
win.addstr(1, x_off, ')')
win.addstr(2, 2, 'B', curses.color_pair(COLOR_BRICK))
win.addstr(2, 3, '[{:>2}]'.format(brick))
win.addstr(2, 8, 'L', curses.color_pair(COLOR_LUMBER))
win.addstr(2, 9, '[{:>2}]'.format(lumber))
win.addstr(2, 14, 'O', curses.color_pair(COLOR_ORE))
win.addstr(2, 15, '[{:>2}]'.format(ore))
win.addstr(2, 20, 'G', curses.color_pair(COLOR_GRAIN))
win.addstr(2, 21, '[{:>2}]'.format(grain))
win.addstr(2, 26, 'W', curses.color_pair(COLOR_WOOL))
win.addstr(2, 27, '[{:>2}]'.format(wool))
player = self.cashan.players[self.self_player]
knight = player.development_cards[Knight]
progress = sum(n for c, n in player.development_cards.items()
if isinstance(c, Progress))
victory = player.development_cards[VictoryPoint]
total_victory = self.cashan.count_victory_points(player)
# Draw cards owned by player
win.addstr(4, 2, 'Hand(')
win.addstr(4, 7, 'K', curses.color_pair(COLOR_KNIGHT))
win.addstr(4, 8, '[{:>2}]'.format(knight))
win.addstr(4, 13, 'P', curses.color_pair(COLOR_PROGRESS))
win.addstr(4, 14, '[{:>2}]'.format(progress))
win.addstr(4, 19, 'V', curses.color_pair(COLOR_VICTORY))
win.addstr(4, 20, '[{:>2}])'.format(victory))
brick = player.resources[Brick]
lumber = player.resources[Lumber]
ore = player.resources[Ore]
grain = player.resources[Grain]
wool = player.resources[Wool]
win.addstr(5, 2, 'B', curses.color_pair(COLOR_BRICK))
win.addstr(5, 3, '[{:>2}]'.format(brick))
win.addstr(5, 8, 'L', curses.color_pair(COLOR_LUMBER))
win.addstr(5, 9, '[{:>2}]'.format(lumber))
win.addstr(5, 14, 'O', curses.color_pair(COLOR_ORE))
win.addstr(5, 15, '[{:>2}]'.format(ore))
win.addstr(5, 20, 'G', curses.color_pair(COLOR_GRAIN))
win.addstr(5, 21, '[{:>2}]'.format(grain))
win.addstr(5, 26, 'W', curses.color_pair(COLOR_WOOL))
win.addstr(5, 27, '[{:>2}]'.format(wool))
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + self.self_player)
# Items in player's reserve
win.addstr(6, 2, 'x', attr)
win.addstr(6, 3, '[{}]'.format(player.settlements))
win.addstr(6, 8, 'X', attr)
win.addstr(6, 9, '[{}]'.format(player.cities))
win.addstr(6, 14, '/', attr)
win.addstr(6, 15, '[{:>2}]'.format(player.roads))
# Total victory points
win.addstr(6, 21, 'V', curses.color_pair(COLOR_VICTORY))
win.addstr(6, 22, '({:>2})'.format(total_victory))
# Draw player listing, starting with header
win.addstr(1, x - 17, 'R', curses.color_pair(COLOR_LUMBER))
win.addstr(1, x - 12, 'K', curses.color_pair(COLOR_KNIGHT))
longest_name = max(len(p.name) for p in self.cashan.players)
startx = x - longest_name - 5 - 6 - 3 - 3 - 2
# | | | | `- Space at the end
# | | | `- 'LR '
# | | `- 'LA '
# | `- ' [nn] ' - Knight count
# `- ' [nn]' - Resource count
for i, p in enumerate(self.cashan.players):
win.addstr(2 + i, startx, p.name, curses.color_pair(COLOR_PLAYER0 + i))
n_knights = p.played_development_cards[Knight]
n_res = sum(p.resources.values())
win.addstr(2 + i, x - 18, '[{:>2}]'.format(n_res))
win.addstr(2 + i, x - 13, '[{:>2}]'.format(n_knights))
if LargestArmy in p.special_cards:
win.addstr(2 + i, x - 8, 'LA', curses.color_pair(COLOR_SPECIAL))
if LongestRoad in p.special_cards:
win.addstr(2 + i, x - 5, 'LR', curses.color_pair(COLOR_SPECIAL))
if self.player_turn == i:
win.addstr(2 + i, startx - 2, '*', curses.A_BOLD)
def draw_field(self, y, x):
self.draw_display(y, x)
# Draw cells
for pos, cell in self.cashan.grid.items():
self.draw_cell(y, x, pos, cell)
# Draw roads
for (pos, edge), road in self.cashan.roads.items():
self.draw_road(y, x, pos, edge, road)
# Draw normal-priority state
self.draw_state(y, x, PRI_NORMAL)
# Draw settlements and cities
for (pos, intr), obj in self.cashan.buildings.items():
self.draw_building(y, x, pos, intr, obj)
# Draw high-priority state (e.g. overlay text box)
self.draw_state(y, x, PRI_HIGH)
def draw_state(self, y, x, priority = PRI_NORMAL):
if not self.action_messages and self.states:
state = self.states[-1]
if state.priority == priority and state.accepts_input(self.self_player):
state.draw_state(y, x)
def draw_message(self, y, x):
win = self.stdscr
if self.action_messages:
self.draw_action_message(y, x, self.action_messages[0])
elif self.message:
self.draw_message_lines(y, x, self.message)
else:
if self.states:
state = self.states[-1]
if state.accepts_input(self.self_player):
msg = self.states[-1].display_message
if msg:
self.draw_message_lines(y, x, msg)
def draw_message_lines(self, y, x, lines):
if isinstance(lines, str):
lines = [lines]
for i, msg in enumerate(reversed(lines), 1):
self.stdscr.addstr(y - i, 0, msg[:x], curses.A_BOLD)
def draw_action_message(self, y, x, msg):
player, action, params = msg
w = ScreenWriter(self.stdscr, y, x, y - 2, 0)
p = self.cashan.players
w.write('* ')
if player is not None:
player_color = curses.color_pair(COLOR_PLAYER0 + player)
w.write(p[player].name, curses.A_BOLD | player_color)
w.move_col(1)
if action == 'acquire_resources':
w.write('acquired')
w.write_resources(params)
elif action == 'dev_card':
if player == self.self_player:
card = params
color = dev_color(card)
w.write('received a ')
w.write(card.name, curses.color_pair(color))
w.write(' card')
else:
w.write('bought a Development Card')
elif action == 'discard_resource':
w.write('discarded')
w.write_resources(params)
elif action == 'largest_army':
size, taken = params
if taken is None:
w.write('has achieved ')
w.write('Largest Army', curses.color_pair(COLOR_SPECIAL))
w.write(' with {} knights'.format(size))
else:
w.write('took ')
w.write('Largest Army', curses.color_pair(COLOR_SPECIAL))
w.write(' from ')
w.write(p[taken].name, curses.color_pair(COLOR_PLAYER0 + taken))
w.write(' with {} knights'.format(size))
elif action == 'longest_road':
length, taken = params
if taken is None:
w.write('has achieved ')
w.write('Longest Road', curses.color_pair(COLOR_SPECIAL))
w.write(' with {} continuous roads'.format(length))
else:
w.write('took ')
w.write('Longest Road', curses.color_pair(COLOR_SPECIAL))
w.write(' from ')
w.write(p[taken].name, curses.color_pair(COLOR_PLAYER0 + taken))
w.write(' with {} continuous roads'.format(length))
elif action == 'move_robber':
w.write('moved the ')
w.write('Robber', curses.A_BOLD)
elif action == 'place_building':
item, pos, intr = params
w.write('placed a {}'.format(item))
self.draw_building_at(y, x, pos, intr,
'x' if item is Settlement else 'X',
curses.A_BOLD | curses.A_REVERSE | player_color)
elif action == 'place_road':
pos, edge = params
w.write('placed a road')
self.draw_road_at(y, x, pos, edge,
curses.A_BOLD | curses.A_REVERSE | player_color)
elif action == 'play_card':
card = params
w.write('played ')
w.write(card.name, curses.color_pair(dev_color(card)))
elif action == 'resource_exhausted':
w.write('Nobody receives ')
w.write_resource_name(params)
elif action == 'resource_produced':
w.write('received')
w.write_resources(params)
elif action == 'roll':
n = params
w.write('rolled: ')
w.write(str(n), curses.A_BOLD)
elif action == 'steal':
target, res = params
if self.self_player in (player, target):
w.write('stole 1 ')
_, r_color = resource_name(res)
w.write(res.name, curses.color_pair(r_color))
w.write(' from ')
w.write(p[target].name, curses.color_pair(COLOR_PLAYER0 + target))
else:
w.write('stole from ')
w.write(p[target].name, curses.color_pair(COLOR_PLAYER0 + target))
elif action == 'steal_fail':
target = params
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + target)
w.write('could not steal from ')
w.write(p[target].name, attr)
w.write(' because ')
w.write(p[target].name, attr)
w.write(' has no resources')
elif action == 'take_none':
w.write('took no ')
w.write_resource_name(params)
w.write(' because nobody had any')
elif action == 'take_resource':
target, resource, n = params
w.write('took {} '.format(n))
w.write_resource_name(resource)
w.write(' from ')
w.write(p[target].name, curses.color_pair(COLOR_PLAYER0 + target))
elif action == 'trade_bank':
(n_give, r_give), (n_recv, r_recv) = params
w.write('traded {} '.format(n_give))
w.write_resource_name(r_give)
w.write(' for {} '.format(n_recv))
w.write_resource_name(r_recv)
elif action == 'trade_player':
other, n_give, r_give, n_recv, r_recv = params
w.write('traded {} '.format(n_give))
w.write_resource_name(r_give)
w.write(' to ')
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + other)
w.write(p[other].name, attr)
w.write(' for {} '.format(n_recv))
w.write_resource_name(r_recv)
elif action == 'withdraw_trade':
mode = params
w.write('withdrew their trade {}'.format(mode))
else:
raise Exception('invalid action: {!r}'.format(action))
w.next_line()
w.write('[Enter]: Next [Space]: Skip', curses.A_BOLD)
def draw_stopped(self, y, x):
raise NotImplementedError
def cell_pos(self, y, x, pos):
'''Returns the origin (y, x) of a cell in screen coordinates'''
# Position coordinates
px, py = pos
# Center coords
cx = x // 2
cy = y // 2
# Grid coords
gx = px
gy = px + py * 2
# Origin coords
ox = cx + gx * 6
oy = cy + gy * 2 # Half height because these are half steps
return oy, ox
def draw_cell(self, y, x, pos, cell):
oy, ox = self.cell_pos(y, x, pos)
win = self.stdscr
if cell.terrain is Sea:
name, color = resource_name(cell.harbor.resource)
ratio = '{}:{}'.format(*cell.harbor.ratio)
win.addstr(oy - 1, ox - 2, name, curses.color_pair(color))
win.addstr(oy + 0, ox - 1, ratio)
else:
win.addstr(oy - 3, ox - 1, '____')
win.addstr(oy - 2, ox - 2, '/ \\')
win.addstr(oy - 1, ox - 3, '/ \\')
win.addstr(oy + 0, ox - 3, '\\ /')
win.addstr(oy + 1, ox - 2, '\\____/')
self.draw_name_at(y, x, pos, cell)
if cell.number is not None:
attr = 0
if cell.number in [6, 8]:
attr |= curses.color_pair(COLOR_HIGH)
win.addstr(oy, ox - 1, '{:>2}'.format(cell.number), attr)
if self.cashan.robber == pos:
win.addstr(oy, ox + 2, 'R', curses.A_BOLD)
def draw_name_at(self, y, x, pos, cell, attr = 0):
oy, ox = self.cell_pos(y, x, pos)
name, color = terrain_name(cell.terrain)
self.stdscr.addstr(oy - 1, ox - 1, name, curses.color_pair(color) | attr)
def draw_road(self, y, x, pos, edge, road):
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + road.owner)
self.draw_road_at(y, x, pos, edge, attr)
def draw_road_at(self, y, x, pos, edge, attr = 0):
oy, ox = self.cell_pos(y, x, pos)
win = self.stdscr
if edge == EDGE_N:
win.addstr(oy - 3, ox - 1, '____', attr)
elif edge == EDGE_NE:
win.addstr(oy - 2, ox + 3, '\\', attr)
win.addstr(oy - 1, ox + 4, '\\', attr)
elif edge == EDGE_NW:
win.addstr(oy - 2, ox - 2, '/', attr)
win.addstr(oy - 1, ox - 3, '/', attr)
elif edge == EDGE_S:
win.addstr(oy + 1, ox - 1, '____', attr)
elif edge == EDGE_SE:
win.addstr(oy + 0, ox + 4, '/', attr)
win.addstr(oy + 1, ox + 3, '/', attr)
elif edge == EDGE_SW:
win.addstr(oy + 0, ox - 3, '\\', attr)
win.addstr(oy + 1, ox - 2, '\\', attr)
else:
raise Exception('invalid edge: {!r}'.format(edge))
def draw_building(self, y, x, pos, intr, obj):
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + obj.owner)
ch = 'X' if obj.ty is City else 'x'
self.draw_building_at(y, x, pos, intr, ch, attr)
def draw_building_at(self, y, x, pos, intr, ch, attr = 0):
oy, ox = self.cell_pos(y, x, pos)
win = self.stdscr
if intr == INTR_NE:
win.addstr(oy - 2, ox + 2, ch, attr)
elif intr == INTR_NW:
win.addstr(oy - 2, ox - 1, ch, attr)
elif intr == INTR_E:
win.addstr(oy - 1, ox + 5, ch, attr)
elif intr == INTR_SE:
win.addstr(oy + 1, ox + 3, ch, attr)
elif intr == INTR_SW:
win.addstr(oy + 1, ox - 1, ch, attr)
elif intr == INTR_W:
win.addstr(oy - 1, ox - 4, ch, attr)
else:
raise Exception('invalid intersection: {!r}'.format(intr))
class PlayState:
def __init__(self, *, ai_players, self_player, phase = 'setup', player_turn = None):
self.ai_players = ai_players
self.phase = phase
self.player_turn = player_turn
self.self_player = self_player
class ScreenWriter:
def __init__(self, win, y, x, start_y, start_x):
self.win = win
self.y = y
self.x = x
self.start_x = start_x
self.cur_y = start_y
self.cur_x = start_x
def write(self, s, attr = 0):
n = len(s)
rem = self.x - self.cur_x
self.win.addstr(self.cur_y, self.cur_x, s, attr)
self.cur_x += min(rem, n)
def write_resource_name(self, r, attr = 0):
_, color = resource_name(r)
self.write(r.name, attr | curses.color_pair(color))
def write_resources(self, res):
first = True
for r, n in res.items():
if n:
if not first:
self.write(',')
first = False
self.write(' {} '.format(n))
self.write_resource_name(r)
def prev_line(self):
self.move_line(-1)
def next_line(self):
self.move_line(1)
def move(self, y, x):
self.cur_y = y
self.cur_x = x
def move_col(self, n):
self.cur_x += n
def move_line(self, n):
self.cur_y += n
self.cur_x = self.start_x
LOG_FILE = None
def log(*args):
if LOG_FILE is not None:
print('{:.2f}'.format(time.monotonic()), *args, file = LOG_FILE)
def ai_driver(game, player, state):
while True:
ret = ai_step(game, player, state)
if ret == CONSUME:
return ret
elif ret == PASS:
return ret
else:
raise Exception('ai_step returned {!r}'.format(ret))
def ai_step(game, player, state):
'''
Operates an AI step on the given state.
Returns CONSUME or PASS
'''
if isinstance(state, BuildSettlement):
pos = random.choice(state.positions)
state.select_position(*pos)
return CONSUME
elif isinstance(state, BuildRoad):
pos = random.choice(state.positions)
state.select_position(*pos)
return CONSUME
elif isinstance(state, HalveResources):
req = state.required[player]
res = game.cashan.players[player].resources.copy()
discard = {}
for i in range(req):
max_r = max(res.items(), key = lambda k: k[1])[0]
res[max_r] -= 1
if max_r in discard:
discard[max_r] += 1
else:
discard[max_r] = 1
if state.set_discard(player, discard):
return CONSUME
return PASS
elif isinstance(state, StartTurn):
if not state.rolled:
state.roll()
return PASS
else:
return CONSUME
elif isinstance(state, SelectCell):
if state.action == 'Move robber':
positions = [pos
for pos, cell in game.cashan.grid
if pos != game.cashan.robber and cell.terrain is not Sea]
random.shuffle(positions)
state.select_position(positions[0])
return CONSUME
else:
raise RuntimeError('unrecognized SelectCell.action: {!r}'.format(state.action))
elif isinstance(state, StealFrom):
state.choose_player(random.choice(state.targets))
return CONSUME
elif isinstance(state, TradeOffer):
return PASS
else:
raise RuntimeError('unrecognized state {!r}'.format(state))
def dec(obj, attr):
'''Decrements obj.attr, raising an Exception if the value is <= 0'''
n = getattr(obj, attr)
if n <= 0:
raise Exception('cannot dec {!r}.{}; value is {!r}'.format(obj, attr, n))
setattr(obj, attr, n - 1)
def dec_item(obj, key):
'''Decrements obj[key], raising an Exception if the value is <= 0'''
n = obj[key]
if n <= 0:
raise Exception('cannot dec {!r}[{!r}]; value is {!r}'.format(obj, key, n))
obj[key] = n - 1
def index_of(itr, predicate):
for i, obj in enumerate(itr):
if predicate(obj):
return i
raise ValueError('index not found in list')
# Values returned from State.player_input
CONSUME, PASS, DIE = range(3)
class State:
display_message = None
priority = PRI_NORMAL
def player_input(self, ch):
raise NotImplementedError
def accepts_input(self, player: int) -> bool:
'''Returns whether the given player may interact with the state'''
return player == self.game.player_turn
def back_state(self):
raise NotImplementedError
def draw_state(self, y, x):
pass
class ConfirmState(State):
'''State requiring confirmation to call a function'''
def __init__(self, game, cb, msg):
self.cb = cb
self.display_message = msg
def accepts_input(self, player):
return player == self.game.self_player
def player_input(self, ch):
if ch == ord('y'):
self.cb()
return DIE
class BuildSettlement(State):
'''Prompts player to place a settlement in one of a set of positions'''
def __init__(self, game, positions):
self.game = game
self.selected = 0
self.positions = positions
if self.can_cancel():
self.display_message = \
'[Arrows]: Select position [Enter]: Build settlement [Space]: Cancel'
else:
self.display_message = \
'[Arrows]: Select position [Enter]: Build settlement'
def player_input(self, ch):
n_positions = len(self.positions)
if ch == curses.KEY_LEFT or ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_positions
elif ch == curses.KEY_RIGHT or ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_positions
elif ch == ord('\n'):
self.select_position(*self.positions[self.selected])
return DIE
elif ch == ord(' ') and self.can_cancel():
return DIE
else:
return PASS
return CONSUME
def select_position(self, pos, intr):
self.game.place_settlement(pos, intr)
if self.game.phase.startswith('setup'):
self.game.push_state(BuildRoad(self.game,
[(p, i) for p, i in intr_edges(pos, intr)
if self.game.cashan.edge_exists(p, i)],
no_cost = True))
def can_cancel(self):
return not self.game.phase.startswith('setup')
def draw_state(self, y, x):
pos, intr = self.positions[self.selected]
for i, (pos, intr) in enumerate(self.positions):
attr = curses.A_BOLD | curses.color_pair(COLOR_PLAYER0 + self.game.self_player)
attr |= curses.A_REVERSE if i == self.selected else 0
self.game.draw_building_at(y, x, pos, intr, '?', attr)
class BuildCity(State):
'''Prompts player to upgrade an existing settlement into a city'''
display_message = '[Arrows]: Select position [Enter]: Build city [Space]: Cancel'
priority = PRI_HIGH
def __init__(self, game, positions):
self.game = game
self.selected = 0
self.positions = positions
def player_input(self, ch):
n_positions = len(self.positions)
if ch == curses.KEY_LEFT or ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_positions
elif ch == curses.KEY_RIGHT or ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_positions
elif ch == ord('\n'):
self.select_position(*self.positions[self.selected])
return DIE
elif ch == ord(' '):
return DIE
else:
return PASS
return CONSUME
def select_position(self, pos, intr):
self.game.place_city(pos, intr)
def draw_state(self, y, x):
pos, intr = self.positions[self.selected]
self.game.draw_building_at(y, x, pos, intr, 'x',
curses.A_BOLD | curses.A_REVERSE |
curses.color_pair(COLOR_PLAYER0 + self.game.self_player))
class BuildRoad(State):
'''Prompts player to place a road in one of a set of positions'''
def __init__(self, game, positions, *, no_cost = False):
self.game = game
self.selected = 0
self.positions = positions
self.no_cost = no_cost
if self.can_cancel():
self.display_message = \
'[Arrows]: Select position [Enter]: Build road [Space]: Cancel'
else:
self.display_message = \
'[Arrows]: Select position [Enter]: Build road'
def player_input(self, ch):
n_positions = len(self.positions)
if ch == curses.KEY_LEFT or ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_positions
elif ch == curses.KEY_RIGHT or ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_positions
elif ch == ord('\n'):
self.select_position(*self.positions[self.selected])
return DIE
elif ch == ord(' ') and self.can_cancel():
return DIE
else:
return PASS
return CONSUME
def select_position(self, pos, edge):
self.game.place_road(pos, edge, no_cost = self.no_cost)
def can_cancel(self):
return not self.game.phase.startswith('setup')
def draw_state(self, y, x):
pos, edge = self.positions[self.selected]
self.game.draw_road_at(y, x, pos, edge,
curses.A_BOLD | curses.A_REVERSE |
curses.color_pair(COLOR_PLAYER0 + self.game.self_player))
class BuildTwoRoads(State):
display_message = '[Arrows]: Select position [Enter]: Build road'
def __init__(self, game):
self.game = game
self.roads_left = 2
self.state = self.next_state()
def player_input(self, ch):
if self.state is None:
return DIE
if ch == ord(' '):
return PASS
res = self.state.player_input(ch)
if res == DIE:
self.roads_left -= 1
self.state = self.next_state()
return DIE if self.roads_left == 0 else CONSUME
return res
def next_state(self):
if self.roads_left:
places = self.game.road_places()
if not places:
return
return BuildRoad(self.game, self.game.road_places(), no_cost = True)
def draw_state(self, y, x):
return self.state.draw_state(y, x)
class HalveResources(State):
'''Forces targeted users to discard some resources'''
display_message = '[Arrows]: Select resources [Enter]: Discard'
priority = PRI_HIGH
def __init__(self, game, players):
'''
players is a dict: { player index: discards required }
'''
self.game = game
self.required = players
self.discards = {}
if game.self_player in players:
self.ui = SelectResourcesUI('You must discard {n} resources',
players[game.self_player],
game.cashan.players[game.self_player].resources)
def accepts_input(self, player):
selfp = self.game.self_player
# If the human player is part of this, let them go first so that
# AI-generated action messages do not interrupt.
if selfp in self.required and selfp not in self.discards:
return player == selfp
return player in self.required and player not in self.discards
def player_input(self, ch):
if self.ui.player_input(ch) == CONSUME:
return CONSUME
elif ch == ord('\n'):
req = self.required[self.game.self_player]
if sum(self.ui.resources.values()) == req:
if self.set_discard(self.game.self_player, self.ui.resources):
return DIE
return CONSUME
else:
return PASS
return CONSUME
def set_discard(self, player, resources):
'''
Sets the amounts of resources to be discarded for the given player.
If this is the last player to set a discard set, it returns True
and triggers discard and further action in the game state.
'''
req = self.required[player]
dis = sum(resources.values())
if req != dis:
raise Exception('set_discard got wrong resource count: '
'expected {}; got {}'.format(req, dis))
self.discards[player] = resources
self.game.player_set_discard(player, resources)
return self.finished()
def finished(self):
if len(self.required) == len(self.discards):
for p, r in self.discards.items():
self.game.player_discard(p, r)
self.game.activate_robber(False)
return True
return False
def draw_state(self, y, x):
self_p = self.game.self_player
if self_p in self.required and self_p not in self.discards:
self.ui.draw(self.game.stdscr, y, x)
class SelectResourcesUI:
def __init__(self, message, max, bounds):
self.message = message
self.max = max
self.bounds = bounds
self.resources = resource_cards(0)
self.selected = 0
def draw(self, win, y, x):
w = 50
h = 10
sub = win.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 3, self.message.format(n = self.max))
for i, r in enumerate(RESOURCES):
name, color = resource_name(r)
sub.addstr(4, 5 + i * 9, name.strip(), curses.color_pair(color))
sub.addstr(5, 6 + i * 9, '{:>2}'.format(self.resources[r]),
curses.A_REVERSE if self.selected == i else 0)
def get_resource(self, index):
return RESOURCES[index]
def player_input(self, ch):
if ch == curses.KEY_LEFT:
self.selected = (self.selected - 1) % 5
elif ch == curses.KEY_RIGHT:
self.selected = (self.selected + 1) % 5
elif ch == curses.KEY_UP:
r = self.get_resource(self.selected)
if self.bounds[r] != self.resources[r] and \
sum(self.resources.values()) < self.max:
self.resources[r] += 1
elif ch == curses.KEY_DOWN:
r = self.get_resource(self.selected)
if self.resources[r] > 0:
self.resources[r] -= 1
else:
return PASS
return CONSUME
class SelectCell(State):
'''Selects a cell on the map and calls a callback with the result'''
def __init__(self, game, action, callback, deny = None):
self.game = game
self.action = action
self.callback = callback
self.selected = (0, 0)
self.deny = deny
if deny == (0, 0):
self.selected = (0, -1)
self.display_message = '[Arrows]: Select cell [Enter]: {}'.format(action)
def player_input(self, ch):
x, y = self.selected
if ch == curses.KEY_LEFT:
x -= 1
elif ch == curses.KEY_RIGHT:
x += 1
elif ch == curses.KEY_UP:
y -= 1
elif ch == curses.KEY_DOWN:
y += 1
elif ch == ord('\n'):
self.select_position(self.selected)
return DIE
else:
return PASS
if self.deny != (x, y) and self.game.cashan.cell_exists((x, y)):
self.selected = (x, y)
return CONSUME
def select_position(self, pos):
self.callback(pos)
def draw_state(self, y, x):
cell = self.game.cashan.grid[self.selected]
self.game.draw_name_at(y, x, self.selected, cell, curses.A_REVERSE)
class SelectResource(State):
display_message = '[Arrows]: Select resource [Enter]: Take resources'
priority = PRI_HIGH
def __init__(self, game):
self.game = game
self.selected = 0
def player_input(self, ch):
selected = self.selected
if ch == curses.KEY_UP:
self.selected = (selected - 1) % len(RESOURCES)
elif ch == curses.KEY_DOWN:
self.selected = (selected + 1) % len(RESOURCES)
elif ch == ord('\n'):
self.select_resource(RESOURCES[self.selected])
return DIE
else:
return PASS
return CONSUME
def select_resource(self, resource):
self.game.take_all_resource(resource)
def draw_state(self, y, x):
w = 50
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 5, 'Select a resource')
sub.addstr(4 + self.selected, 3, '*')
for i, r in enumerate(RESOURCES):
ScreenWriter(sub, h, w, 4 + i, 5).write_resource_name(r)
class SelectResourceCards(State):
display_message = '[Arrows]: Select resources [Enter]: Acquire resources'
priority = PRI_HIGH
def __init__(self, game):
self.game = game
self.n_resources = n = min(2, sum(game.cashan.resources.values()))
self.ui = SelectResourcesUI('Select {n} resources',
n, game.cashan.resources)
def player_input(self, ch):
if self.ui.player_input(ch) == CONSUME:
return CONSUME
elif ch == ord('\n'):
if sum(self.ui.resources.values()) == self.n_resources:
self.select_resources(self.ui.resources)
return DIE
else:
return PASS
return CONSUME
def select_resources(self, resources):
self.game.acquire_resources(resources)
def draw_state(self, y, x):
self.ui.draw(self.game.stdscr, y, x)
class StartTurn(State):
'''Represents the beginning of a normal turn'''
def __init__(self, game):
self.game = game
self.rolled = False
self.played = False
self.bought = defaultdict(int)
def player_input(self, ch):
if ch == ord('r') and not self.rolled:
self.roll()
elif ch == ord('b'):
self.game.push_state(Buy(self.game, self))
elif ch == ord('t') and self.can_trade():
self.game.push_state(Trade(self.game, self))
elif ch == ord('p') and self.can_play():
self.game.push_state(Play(self.game, self))
elif ch == ord('v') and self.can_declare():
self.game.declare_victory()
elif ch == ord('e') and self.can_end():
return DIE
else:
return PASS
return CONSUME
def roll(self):
self.rolled = True
self.game.player_roll()
def can_buy(self):
player = self.game.current_player
return any(player.can_buy(i) for i in purchasable())
def can_declare(self):
return self.game.cashan.count_victory_points(
self.game.current_player) >= 10
def can_end(self):
return self.rolled
def can_play(self):
return not self.played and any(self.is_playable(card)
for card in self.game.current_player.development_cards.keys())
def is_playable(self, card):
hand = self.game.current_player.development_cards
return card is not VictoryPoint and hand[card] - self.bought[card] > 0
def can_roll(self):
return not self.rolled
def can_trade(self):
return any(self.game.current_player.resources.values())
def buy_item(self, item):
if item is Road:
self.game.push_state(BuildRoad(self.game,
self.game.road_places()))
elif item is Settlement:
self.game.push_state(BuildSettlement(self.game,
self.game.settlement_places()))
elif item is City:
self.game.push_state(BuildCity(self.game,
self.game.city_places()))
elif item is DevelopmentCard:
self.bought[self.game.buy_development_card()] += 1
else:
raise Exception('invalid item: {!r}'.format(item))
def play_card(self, card):
self.played = True
self.game.play_card(card)
def propose_trade(self, mode, n, resource):
'''
Proposes a trade of given mode ('offer' or 'request').
'''
return self.game.propose_trade(mode, n, resource)
def perform_maritime_trade(self, give, recv):
'''Trades give: (n, resource) for recv: (n, resource) with the "bank"'''
self.game.trade_with_bank(give, recv)
@property
def display_message(self):
msg = []
if self.can_roll():
msg.append('[R]oll dice')
if self.can_buy():
msg.append('[B]uy')
else:
# Always show 'Buy' so that players can check costs
msg.append('[B]: Check costs')
if self.can_trade():
msg.append('[T]rade')
if self.can_play():
msg.append('[P]lay')
if self.can_declare():
msg.append('[V]: Declare victory')
if self.can_end():
msg.append('[E]nd turn')
return ' '.join(msg)
class TradeOffer(State):
priority = PRI_HIGH
def __init__(self, game, mode, n, resource):
self.game = game
self.mode = mode
self.n = n
self.resource = resource
# [ (player, n, resource), ... ]
self.offers = []
# { player: ( 'offered' | 'rejected' ), ... }
self.states = {}
owner = game.player_turn == game.self_player
self.ui = TradeOwnerUI(self) if owner else TradeOtherUI(self)
@property
def display_message(self):
return self.ui.display_message
def accepts_input(self, player):
return True
def player_input(self, ch):
return self.ui.player_input(ch)
def accept_offer(self, offer):
player, n, resource = offer
if self.mode == 'offer':
self.game.trade_with_player(player, self.n, self.resource, n, resource)
return DIE
else:
if self.game.current_player.resources[resource] < n:
self.game.set_message('Not enough {} to trade'.format(resource.name))
return CONSUME
self.game.trade_with_player(player, n, resource, self.n, self.resource)
return DIE
def reject_offer(self, index = None, *, player = None):
if index is None:
index = index_of(self.offers, lambda off: off[0] == player)
player, _, _ = self.offers.pop(index)
self.states[player] = 'rejected'
self.game.reject_trade_offer(player)
def submit_offer(self, player, n, resource):
self.offers.append((player, n, resource))
self.states[player] = 'offered'
self.game.trade_offer(player, n, resource)
def withdraw_trade(self):
self.game.withdraw_trade(self.mode)
def draw_state(self, y, x):
self.ui.draw(y, x)
class TradeOwnerUI:
def __init__(self, trade):
self.game = trade.game
self.trade = trade
self.selected = None
@property
def display_message(self):
if self.trade.offers:
if self.selected is None:
return ['[Arrows]: Select offer',
'[Space]: Withdraw {}'.format(self.trade.mode)]
else:
return ['[Arrows]: Select offer [Enter]: Accept offer [R]eject offer',
'[Space]: Withdraw {}'.format(self.trade.mode)]
else:
return '[Space]: Withdraw {}'.format(self.trade.mode)
def player_input(self, ch):
selected = self.selected
n_offers = len(self.trade.offers)
if ch == curses.KEY_UP:
if n_offers:
self.selected = (n_offers - 1 if selected is None else
(selected - 1) % n_offers)
elif ch == curses.KEY_DOWN:
if n_offers:
self.selected = (0 if selected is None else
(selected + 1) % n_offers)
elif ch == ord('r'):
if self.selected is not None:
self.trade.reject_offer(self.selected)
self.selected = None
return CONSUME
elif ch == ord(' '):
self.trade.withdraw_trade()
return DIE
elif ch == ord('\n'):
if self.selected is not None:
return self.trade.accept_offer(self.trade.offers[self.selected])
else:
return PASS
return CONSUME
def draw(self, y, x):
w = 50
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
mode = self.trade.mode
wr = ScreenWriter(sub, h, w, 2, 10)
wr.write('You {}ed {} '.format(mode, self.trade.n))
wr.write_resource_name(self.trade.resource)
if self.trade.offers:
players = self.game.cashan.players
longest_name = max(len(p.name) for p in players)
sub.addstr(4, 14, 'Select an offer')
if self.selected is not None:
sub.addstr(6 + self.selected, 3, '*')
give = 'give' in self.trade.offers[0]
for i, (p, n, resource) in enumerate(self.trade.offers):
player = players[p]
sub.addstr(6 + i, 5, player.name, curses.color_pair(COLOR_PLAYER0 + p))
wr = ScreenWriter(sub, y, x, 6 + i, 12 + longest_name)
wr.write(' {}s {} '.format(mode, n))
wr.write_resource_name(resource)
else:
sub.addstr(4, 14, 'Waiting for offers...')
class TradeOtherUI:
def __init__(self, trade):
self.game = trade.game
self.trade = trade
self.offer = TradeInput()
@property
def display_message(self):
if self.can_offer():
return '[0-9]: Set number [B|L|O|G|W]: Set resource [Enter]: Offer trade'
def player_input(self, ch):
if ch == ord('\n'):
self.make_offer()
elif ch == curses.KEY_BACKSPACE or ch == ctrl('h'):
self.clear_field()
elif ch in RESOURCE_KEYS:
self.set_field(RESOURCE_KEYS[ch])
elif ch in NUMBER_KEYS:
self.set_field(NUMBER_KEYS[ch])
else:
return PASS
return CONSUME
def can_offer(self):
return self.trade.states.get(self.game.self_player) != 'offered'
def make_offer(self):
n = self.offer.number
r = self.offer.resource
player = self.game.cashan.players[self.game.self_player]
if r is None:
self.game.set_message('Missing trade resource')
elif r is self.trade.resource:
self.game.set_message('Cannot trade same resource')
elif n == 0:
self.game.set_message('Cannot trade zero')
elif self.trade.mode == 'offer' and player.resources[r] < n:
self.game.set_message('Not enough {} to trade'.format(r.name))
else:
self.trade.submit_offer(self.game.self_player, n, r)
def set_field(self, value):
if isinstance(value, int):
n = self.offer.number
if n == 0:
self.offer.number = value
elif n < 10:
self.offer.number = n * 10 + value
else: # isinstance(value, Resource)
self.offer.resource = value
def clear_field(self):
if self.offer.number == 0:
self.offer.resource = None
else:
self.offer.number = 0
def draw(self, y, x):
w = 50
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
owner = self.game.player_turn
player = self.game.cashan.players[owner]
mode = self.trade.mode
wr = ScreenWriter(sub, h, w, 2, 5)
wr.write(player.name, curses.color_pair(COLOR_PLAYER0 + owner))
wr.write(' {}s {} '.format(mode, self.trade.n))
wr.write_resource_name(self.trade.resource)
state = self.trade.states.get(self.game.self_player)
attr = 0
if self.can_offer():
attr |= curses.A_REVERSE
wr.move(4, 5)
wr.write(mode.title() + ' ')
wr.write('{:>2} '.format(self.offer.number), attr)
if self.offer.resource is None:
wr.write(' ? ', attr)
else:
wr.write_resource_name(self.offer.resource, attr)
if state == 'offered':
wr.move(6, 10)
wr.write(mode.title() + ' submitted')
elif state == 'rejected':
wr.move(6, 10)
wr.write(mode.title() + ' rejected')
class Buy(State):
priority = PRI_HIGH
def __init__(self, game, turn):
self.game = game
self.turn = turn
self.selected = 0
self.items = purchasable()
def player_input(self, ch):
n_items = len(self.items)
if ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_items
elif ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_items
elif ch == ord('\n'):
if self.can_buy(self.items[self.selected]):
self.turn.buy_item(self.items[self.selected])
else:
self.game.set_message('Cannot buy that item')
elif ch == ord(' '):
return DIE
else:
return PASS
return CONSUME
@property
def display_message(self):
if self.can_buy(self.items[self.selected]):
return '[Arrows]: Select item [Enter]: Buy item [Space]: Cancel'
else:
return '[Arrows]: Select item [Space]: Cancel'
def can_buy(self, item):
game = self.game
player = game.current_player
if not player.can_buy(item):
return False
if item is Road:
return bool(player.roads and game.road_places())
elif item is Settlement:
return bool(player.settlements and game.settlement_places())
elif item is City:
return bool(player.cities and game.city_places())
elif item is DevelopmentCard:
return bool(game.cashan.development_cards)
else:
raise Exception('invalid item: {!r}'.format(item))
def draw_state(self, y, x):
w = 50
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 14, 'Choose an item to buy')
for i, item in enumerate(self.items):
can_buy = self.can_buy(item)
if i == self.selected:
if can_buy:
sub.addstr(4 + i * 2, 4, '*')
else:
sub.addstr(4 + i * 2, 4, 'x',
curses.A_BOLD | curses.color_pair(COLOR_INVALID))
attr = curses.A_BOLD if can_buy else 0
sub.addstr(4 + i * 2, 6, item.name, attr)
wr = ScreenWriter(sub, h, w, 5 + i * 2, 6)
for n, r in item.cost:
_, color = resource_name(r)
wr.write(' {} '.format(n))
wr.write(r.name, curses.color_pair(color))
class TradeInput:
def __init__(self):
self.number = 0
self.resource = None
def gen_state_id(NEXT_ID = itertools.count()):
# itertools is a builtin module, which means that the GIL cannot be released
# during the call of next(NEXT_ID). Therefore, this is effectively atomic.
return next(NEXT_ID)
RESOURCE_KEYS = {
ord('b'): Brick,
ord('l'): Lumber,
ord('o'): Ore,
ord('g'): Grain,
ord('w'): Wool,
}
NUMBER_KEYS = {
ord('0'): 0,
ord('1'): 1,
ord('2'): 2,
ord('3'): 3,
ord('4'): 4,
ord('5'): 5,
ord('6'): 6,
ord('7'): 7,
ord('8'): 8,
ord('9'): 9,
}
class Trade(State):
priority = PRI_HIGH
def __init__(self, game, turn):
self.game = game
self.turn = turn
self.selected = 0
self.inputs = [TradeInput() for i in range(4)]
@property
def display_message(self):
msg = ['[Arrows]: Select', '[0-9]: Set number',
'[B|L|O|G|W]: Set resource', '[Backspace]: Clear']
msg2 = []
if self.selected >= 2:
msg2.append('[Enter]: Trade')
else:
msg2.append('[Enter]: Propose trade')
msg2.append('[Space]: Cancel')
return [' '.join(msg), ' '.join(msg2)]
def player_input(self, ch):
selected = self.selected
if ch == curses.KEY_LEFT or ch == curses.KEY_RIGHT:
self.selected = { 2: 3, 3: 2 }.get(selected, selected)
elif ch == curses.KEY_UP:
self.selected = { 0: 2, 1: 0, 2: 1, 3: 1 }[selected]
elif ch == curses.KEY_DOWN:
self.selected = { 0: 1, 1: 2, 2: 0, 3: 0 }[selected]
elif ch in RESOURCE_KEYS:
self.set_input(selected, RESOURCE_KEYS[ch])
elif ch in NUMBER_KEYS:
self.set_input(selected, NUMBER_KEYS[ch])
elif ch == ord(' '):
return DIE
elif ch == ord('\n'):
return self.make_trade()
elif ch == curses.KEY_BACKSPACE or ch == ctrl('h'):
self.clear_input(selected)
else:
return PASS
return CONSUME
def clear_input(self, selected):
field = self.inputs[selected]
if field.number == 0:
field.resource = None
else:
field.number = 0
def set_input(self, selected, value):
i = self.inputs[selected]
if isinstance(value, int):
n = i.number
if n == 0:
i.number = value
self.adjust_linked_inputs(selected)
elif n < 10:
i.number = n * 10 + value
self.adjust_linked_inputs(selected)
else: # isinstance(value, Resource)
i.resource = value
self.adjust_linked_inputs(selected)
# Linked inputs for maritime trading
LINKED_INPUTS = { 2: 3, 3: 2 }
def adjust_linked_inputs(self, selected):
linked = self.LINKED_INPUTS.get(selected)
if linked is not None:
m, n = self.get_ratio()
if selected == 2:
amount = self.inputs[2].number
if amount % m == 0:
new_amount = (amount // m) * n
if new_amount < 100:
self.inputs[3].number = new_amount
else:
amount = self.inputs[3].number
if amount % n == 0:
new_amount = (amount // n) * m
if new_amount < 100:
self.inputs[2].number = new_amount
def make_trade(self):
selected = self.selected
if selected == 0: # Offer
n = self.inputs[0].number
r = self.inputs[0].resource
player = self.game.current_player
if r is None:
self.game.set_message('Missing trade resource')
elif n == 0:
self.game.set_message('Cannot trade zero')
elif player.resources[r] < n:
self.game.set_message('Not enough {} to trade'.format(r.name))
else:
self.turn.propose_trade('offer', n, r)
elif selected == 1: # Request
n = self.inputs[1].number
r = self.inputs[1].resource
if r is None:
self.game.set_message('Missing trade resource')
elif n == 0:
self.game.set_message('Cannot trade zero')
else:
self.turn.propose_trade('request', n, r)
else: # Maritime trade
n_give = self.inputs[2].number
r_give = self.inputs[2].resource
n_recv = self.inputs[3].number
r_recv = self.inputs[3].resource
if n_give == 0 or n_recv == 0:
self.game.set_message('Cannot trade zero')
elif r_give is None or r_recv is None:
self.game.set_message('Missing trade resource')
elif r_give is r_recv:
self.game.set_message('Cannot trade for same resource')
else:
player = self.game.current_player
ratio = self.get_ratio()
m, n = ratio
if n_give % m != 0 or n_recv % n != 0:
self.game.set_message('Impossible trade amount')
elif player.resources[r_give] < n_give:
self.game.set_message('Not enough {} to trade'
.format(r_give.name))
elif self.game.cashan.resources[r_recv] < n_recv:
self.game.set_message('Not enough {} to trade'
.format(r_recv.name))
else:
self.turn.perform_maritime_trade(
(n_give, r_give), (n_recv, r_recv))
return DIE
return CONSUME
def get_ratio(self):
r = self.inputs[2].resource
return self.game.cashan.get_trade_ratio(self.game.player_turn, r)
def draw_state(self, y, x):
w = 40
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
def attr(n):
return curses.A_REVERSE if n == self.selected else 0
inputs = self.inputs
sub.addstr(2, 12, 'Propose a trade', curses.A_BOLD)
sub.addstr(4, 5, 'Offer')
sub.addstr(4, 13, '{:>2} '.format(inputs[0].number), attr(0))
r = inputs[0].resource
if r is not None:
ScreenWriter(sub, y, x, 4, 16).write_resource_name(r, attr(0))
else:
sub.addstr(4, 16, ' ? ', attr(0))
sub.addstr(6, w // 2 - 1, 'or', curses.A_BOLD)
sub.addstr(8, 5, 'Request')
sub.addstr(8, 13, '{:>2} '.format(inputs[1].number), attr(1))
r = inputs[1].resource
if r is not None:
ScreenWriter(sub, y, x, 8, 16).write_resource_name(r, attr(1))
else:
sub.addstr(8, 16, ' ? ', attr(1))
sub.addstr(10, 12, 'Maritime trade', curses.A_BOLD)
sub.addstr(12, 5, '{:>2} '.format(inputs[2].number), attr(2))
r = inputs[2].resource
if r is not None:
ScreenWriter(sub, y, x, 12, 8).write_resource_name(r, attr(2))
else:
sub.addstr(12, 8, ' ? ', attr(2))
sub.addstr(12, 16, 'for')
sub.addstr(12, 21, '{:>2} '.format(inputs[3].number), attr(3))
r = inputs[3].resource
if r is not None:
ScreenWriter(sub, y, x, 12, 24).write_resource_name(r, attr(3))
else:
sub.addstr(12, 24, ' ? ', attr(3))
sub.addstr(12, 32, '{}:{}'.format(*self.get_ratio()))
class Play(State):
display_message = '[Arrows]: Select [Enter]: Play card [Space]: Cancel'
priority = PRI_HIGH
def __init__(self, game, turn):
self.game = game
self.turn = turn
self.selected = 0
hand = game.current_player.development_cards
self.cards = [card for card in DEVELOPMENT if hand[card]]
def player_input(self, ch):
selected = self.selected
n_cards = len(self.cards)
if ch == curses.KEY_UP:
self.selected = (selected - 1) % n_cards
elif ch == curses.KEY_DOWN:
self.selected = (selected + 1) % n_cards
elif ch == ord(' '):
return DIE
elif ch == ord('\n'):
if self.turn.is_playable(self.cards[self.selected]):
self.turn.play_card(self.cards[self.selected])
return DIE
else:
return PASS
return CONSUME
def draw_state(self, y, x):
w = 60
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 19, 'Choose a card to play')
hand = self.game.current_player.development_cards
if self.turn.is_playable(self.cards[self.selected]):
sub.addstr(4 + self.selected, 3, '*')
else:
sub.addstr(4 + self.selected, 3, 'x',
curses.A_BOLD | curses.color_pair(COLOR_INVALID))
for i, card in enumerate(self.cards):
sub.addstr(4 + i, 5, card.name, curses.color_pair(dev_color(card)))
sub.addstr(4 + i, 25, str(hand[card]))
for i, line in enumerate(self.cards[self.selected].effect.splitlines()):
sub.addstr(10 + i, 5, line)
class StealFrom(State):
'''Chooses which opponent from whom to steal'''
display_name = '[Arrows]: Select player [Enter]: Steal resource'
priority = PRI_HIGH
def __init__(self, game, targets):
self.game = game
self.targets = targets
self.selected = 0
def player_input(self, ch):
n_targets = len(self.targets)
if ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_targets
elif ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_targets
elif ch == ord('\n'):
self.choose_player(self.targets[self.selected])
return DIE
else:
return PASS
return CONSUME
def choose_player(self, player):
'''
Steals the resource from the player.
NOTE: player argument indicates index into game.players
rather than self.targets
'''
self.game.steal_resource(player)
def draw_state(self, y, x):
w = 50
h = 10
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 9, 'Choose the target of the robber')
p = self.game.cashan.players
sub.addstr(4 + self.selected, 18, '*')
for i, t in enumerate(self.targets):
sub.addstr(4 + i, 20, p[t].name, curses.color_pair(COLOR_PLAYER0 + t))
class Victory(State):
priority = PRI_HIGH
def __init__(self, game):
self.game = game
def accepts_input(self, player):
return player == self.game.self_player
def player_input(self, ch):
return PASS
def draw_state(self, y, x):
w = 50
h = 10
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
players = self.game.cashan.players
placing = self.game.get_placing()
longest_name = max(len(p.name) for p in self.game.cashan.players)
winner = next(iter(placing))
winner_p = players[winner['player']]
textw = len(winner_p.name) + 17
startx = (w - textw) // 2
sub.addstr(2, startx, winner_p.name,
curses.color_pair(COLOR_PLAYER0 + winner['player']))
sub.addstr(2, startx + len(winner_p.name) + 1,
'has won the game!')
for i, p in enumerate(placing):
player_n = p['player']
player = players[player_n]
sub.addstr(4 + i, 5, '{}.'.format(p['place']))
sub.addstr(4 + i, 8, player.name, curses.color_pair(COLOR_PLAYER0 + player_n))
sub.addstr(4 + i, 10 + longest_name, '({:>2})'.format(p['points']))
class WaitRemote(State):
'''Waits for a remote player to finish their turn'''
def __init__(self, game):
self.game = game
self.display_message = 'Waiting for {}\'s turn...'.format(
game.current_player.name)
def accepts_input(self, player):
# All inputs are ignored, but this is required to draw the state.
return player == self.game.self_player
def player_input(self, ch):
return PASS
def dev_color(d):
return {
Knight: COLOR_KNIGHT,
VictoryPoint: COLOR_VICTORY,
}.get(d, COLOR_PROGRESS)
def resource_name(r):
return {
Brick: ('Brick', COLOR_BRICK),
Lumber: ('Lumber', COLOR_LUMBER),
Ore: (' Ore', COLOR_ORE),
Grain: ('Grain', COLOR_GRAIN),
Wool: (' Wool', COLOR_WOOL),
None: (' *', COLOR_ANY),
}[r]
def terrain_name(tr):
'''Returns short name and color for the given terrain type'''
return {
Hills: ('Hill', COLOR_HILLS),
Forest: ('Frst', COLOR_FOREST),
Mountains: ('Mnts', COLOR_MOUNTAINS),
Fields: ('Flds', COLOR_FIELDS),
Pasture: ('Pstr', COLOR_PASTURE),
Desert: ('Dsrt', COLOR_DESERT),
}[tr]
def get_config(args):
name = input('What is your name? ')
config = { 'name': name }
save_config(args, config)
return config
def config_path(file = None):
if sys.platform.startswith('win'):
path = os.path.expanduser('~/AppData/Cashan')
else:
path = os.path.expanduser('~/.config/cashan')
if file:
return os.path.join(path, file)
return path
def load_config(args):
with open(args.config, 'r') as f:
return json.load(f)
def save_config(args, config):
dirname = os.path.dirname(args.config)
if dirname:
os.makedirs(dirname, exist_ok = True)
with open(args.config, 'w') as f:
json.dump(config, f)
f.write('\n')
print('Configuration saved to', args.config)
def resume_game(name, players):
with open(name, 'r') as f:
state = json.load(f)
game = Cashan.load_state(state.get('game', dict))
phase = state.get('phase')
player_turn = state.get('player_turn', object)
names = [p.name for p in game.players]
if set(names) != set(players):
raise Exception('player names do not match saved game: '
'{!r} in this game; expected {!r}'.format(players, names))
return game, phase, player_turn
def save_game(name, game):
os.makedirs(config_path('games'), exist_ok = True)
name = config_path('games/{}.json'.format(name))
with open(name, 'w') as f:
json.dump({
'game': game.cashan.dump_state(),
'phase': game.phase,
'player_turn': game.player_turn,
'version': VERSION,
}, f)
f.write('\n')
def options():
args = argparse.ArgumentParser(usage = '%(prog)s [OPTIONS]')
args.add_argument('-c', '--config', action = 'store',
metavar = 'FILE', help = 'Path to configuration file')
args.add_argument('--host', action = 'store',
metavar = 'NAME', help = 'Host a game')
args.add_argument('--join', action = 'store',
metavar = 'NAME', help = 'Join an existing game')
args.add_argument('-l', '--log', metavar = 'FILE',
help = 'Log debug messages to FILE')
args.add_argument('--resume', action = 'store',
help = 'Resume a saved game')
args.add_argument('--save', action = 'store_true',
help = 'Save game state at the beginning of each round')
args.add_argument('--no-save', action = 'store_true',
help = 'Do not save game state')
args.add_argument('-p', '--players', type = int, action = 'store',
metavar = 'N', help = 'Number of players in multiplayer', default = 4)
args.add_argument('--server', action = 'store',
metavar = 'HOST:PORT', help = 'Address of multiplayer server')
args.add_argument('-V', '--version',
action = 'version', version = 'cashan ' + VERSION)
return args
AI_NAMES = ['Alice', 'Bob', 'Eve', 'Mallory']
def fill_names(players, n):
'''
Adds n AI names to the list of players and returns the shuffled list of names
'''
names = list(set(AI_NAMES) - set(players))
random.shuffle(names)
players.extend(names[:n])
random.shuffle(players)
return players
def parse_address(addr):
addr, sep, port = addr.rpartition(':')
if not sep:
raise ValueError('invalid address')
if addr.startswith('[') and addr.endswith(']'):
addr = addr[1:-1]
return (addr, int(port))
def auth_server(args, config):
if args.server:
server = args.server
write_config = True
elif 'server' in config:
server = config['server']
write_config = False
else:
server = input('Enter host:port of server: ')
write_config = True
address = parse_address(server)
if write_config:
config['server'] = server
save_config(args, config)
sock = socket.socket()
sock.connect(address)
conn = Connection(sock)
conn.write_message({
'action': 'hello',
'name': config['name'],
'version': VERSION,
})
msg = conn.recv_message()
if msg is None:
raise ClientError('connection closed')
action = msg.get('action')
if action == 'error':
raise ClientError('got error from server: {}'.format(msg.get('error')))
if action != 'hello':
raise ClientError('unexpected message from server: {!r}', msg)
# Successful authentication; return connection
return conn
def wait_for_game_start(conn):
while True:
msg = conn.recv_message()
if msg is None:
raise ClientError('connection closed')
action = msg.get('action')
if action == 'host':
print('Hosting game', msg.get('name'))
print('Waiting for more players...')
elif action == 'error':
raise ClientError(msg.get('error'))
elif action == 'join_game':
print('Joined game', msg.get('name'))
print('Waiting for more players...')
elif action == 'join':
print(msg.get('name'), 'joined the game')
elif action == 'leave':
print(msg.get('name'), 'left the game')
elif action == 'ping':
conn.write_message({ 'action': 'pong' })
elif action == 'start':
return msg
else:
raise ClientError('unexpected message: {!r}'.format(msg))
def make_save_name():
return time.strftime('game %Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
args = options().parse_args()
if args.config is None:
args.config = config_path('cashan.cfg')
if args.log:
LOG_FILE = open(args.log, 'w')
if args.players < 2 or args.players > 4:
raise Exception('number of players must be between 2 and 4')
if args.host and args.join:
raise Exception('--host and --join are mutually exclusive')
if args.join and args.resume:
raise Exception('--join and --resume are incompatible')
try:
config = load_config(args)
except FileNotFoundError:
config = get_config(args)
if args.host or args.join:
conn = auth_server(args, config)
if args.host:
conn.write_message({ 'action': 'host', 'name': args.host,
'players': args.players })
elif args.join:
conn.write_message({ 'action': 'join', 'name': args.join })
msg = wait_for_game_start(conn)
if 'players' in msg:
players = msg.get('players', list)
if args.resume:
game, phase, player_turn = resume_game(args.resume, players)
else:
random.shuffle(players)
game = Cashan(random_grid(), players)
phase = 'setup'
player_turn = None
conn.write_message({ 'action': 'send', 'body':
{ 'action': 'start', 'state': {
'game': game.dump_state(),
'phase': phase,
'player_turn': player_turn,
} } })
elif 'state' in msg:
state = msg.get('state', dict)
game = Cashan.load_state(state.get('game', dict))
phase = state.get('phase')
player_turn = state.get('player_turn', object)
else:
raise ClientError('unexpected message: {!r}'.format(msg))
play_state = PlayState(
ai_players = [],
phase = phase,
player_turn = player_turn,
self_player = index_of(game.players,
lambda p: p.name == config['name']))
else:
conn = None
names = fill_names([config['name']], args.players - 1)
ai_players = [0, 1, 2, 3]
self_player = names.index(config['name'])
ai_players.pop(self_player)
game = Cashan(random_grid(), names)
play_state = PlayState(
self_player = self_player,
ai_players = ai_players)
if args.save or (args.host and not args.no_save):
args.save_name = args.host or args.join or make_save_name()
else:
args.save_name = None
try:
main(CashanGame, args, config, game, play_state, conn)
finally:
if LOG_FILE is not None:
LOG_FILE.flush()
| mit | 587,392,067,179,884,900 | 33.523097 | 91 | 0.542097 | false |
pombreda/py2neo | py2neo/error/server.py | 1 | 3846 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Server error hierarchy
+ NeoError
|
+--- BadRequest (HTTP 400)
|
+--- NotFound (HTTP 404)
|
+--- Conflict (HTTP 409)
|
+--- BatchError (wraps other NeoError from batch submission)
|
+--- CypherError (returned by classic cypher calls)
|
+--- ClientError
|
+--- DatabaseError
|
+--- TransientError
"""
from __future__ import unicode_literals
# class GraphError(Exception):
# """ Default exception class for all errors returned by the
# Neo4j server. See also `CypherError` subclass and `BatchError`
# wrapper class which contain additional qualifying information.
# """
#
# @classmethod
# def hydrate(cls, data):
# static_error_classes = {
# "org.neo4j.cypher.SyntaxException": statement.InvalidSyntax,
# "org.neo4j.cypher.UniquePathNotUniqueException": statement.ConstraintViolation,
# "org.neo4j.graphdb.ConstraintViolationException": statement.ConstraintViolation,
# "SyntaxException": statement.InvalidSyntax,
# "UniquePathNotUniqueException": statement.ConstraintViolation,
# "NotFoundException": statement.EntityNotFound,
# "org.neo4j.graphdb.NotFoundException": statement.EntityNotFound,
# }
# full_name = data.get("fullname")
# if full_name is None:
# full_name = data.get("exception")
# try:
# error_cls = static_error_classes[full_name]
# except KeyError:
# try:
# exception = data["exception"]
# try:
# error_cls = type(exception, (cls,), {})
# except TypeError:
# # for Python 2.x
# error_cls = type(str(exception), (cls,), {})
# except KeyError:
# error_cls = cls
# message = data.pop("message", None)
# return error_cls(message, **data)
#
# def __init__(self, message, **kwargs):
# Exception.__init__(self, message)
# self.message = message
# self.exception = kwargs.get("exception")
# self.full_name = kwargs.get("fullname")
# self.request = kwargs.get("request")
# self.response = kwargs.get("response")
# self.stack_trace = kwargs.get("stacktrace")
# try:
# self.cause = self.hydrate(kwargs["cause"])
# except Exception:
# self.cause = None
class GraphError(Exception):
""" Default exception class for all errors returned by the
Neo4j server.
"""
__cause__ = None
exception = None
fullname = None
request = None
response = None
stacktrace = None
def __new__(cls, *args, **kwargs):
try:
exception = kwargs["exception"]
try:
error_cls = type(exception, (cls,), {})
except TypeError:
# for Python 2.x
error_cls = type(str(exception), (cls,), {})
except KeyError:
error_cls = cls
return Exception.__new__(error_cls, *args)
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
for key, value in kwargs.items():
setattr(self, key, value)
| apache-2.0 | -7,849,616,567,500,276,000 | 30.268293 | 94 | 0.594904 | false |
agoose77/hivesystem | manual/movingpanda/panda-11b.py | 1 | 6279 | import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
mypanda = Spyder.Actor3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
entityname="mypanda")
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
del a, box
class pandawalkhive(bee.inithive):
animation = dragonfly.scene.bound.animation()
walk = dragonfly.std.variable("str")("walk")
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.bound.setPos()
setHpr = dragonfly.scene.bound.setHpr()
interval = dragonfly.time.interval_time(18)
connect(key_w, interval.start)
connect(key_s, interval.pause)
sequence = dragonfly.time.sequence(4)(8, 1, 8, 1)
connect(interval.value, sequence.inp)
ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(sequence.outp1, ip1)
connect(ip1, setPos)
connect(key_w, ip1.start)
connect(key_s, ip1.stop)
ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0))
connect(sequence.outp2, ip2)
connect(ip2, setHpr)
connect(key_w, ip2.start)
connect(key_s, ip2.stop)
ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0))
connect(sequence.outp3, ip3)
connect(ip3, setPos)
connect(key_w, ip3.start)
connect(key_s, ip3.stop)
ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0))
connect(sequence.outp4, ip4)
connect(ip4, setHpr)
connect(key_w, ip4.start)
connect(key_s, ip4.stop)
connect(ip4.reach_end, interval.start)
from bee.staticbind import staticbind_baseclass
class pandawalkbind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = pandawalkhive
class camerabindhive(bee.inithive):
interval = dragonfly.time.interval_time(30)
sequence = dragonfly.time.sequence(2)(1, 1)
connect(interval.value, sequence.inp)
startsensor = dragonfly.sys.startsensor()
ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0))
ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0))
connect(sequence.outp1, ip1.inp)
connect(sequence.outp2, ip2.inp)
connect(startsensor, interval.start)
connect(startsensor, ip1.start)
connect(ip1.reach_end, ip1.stop)
connect(ip1.reach_end, ip2.start)
connect(ip2.reach_end, ip2.stop)
connect(ip2.reach_end, ip1.start)
connect(ip2.reach_end, interval.start)
sethpr = dragonfly.scene.bound.setHpr()
connect(ip1, sethpr)
connect(ip2, sethpr)
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
z_pandawalk = pandawalkbind().worker()
pandaid = dragonfly.std.variable("id")(pandaname_)
connect(pandaid, z_pandawalk.bindname)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, -20, 3)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
| bsd-2-clause | -8,391,680,054,532,955,000 | 27.540909 | 109 | 0.644529 | false |
InteractiveComputerGraphics/SPlisHSPlasH | Scripts/Paraview/partio_extension/setup.py | 1 | 3879 | from setuptools import setup
# Available at setup time due to pyproject.toml
from pybind11.setup_helpers import Pybind11Extension, build_ext
from pybind11 import get_cmake_dir
import sys
import platform
__version__ = "0.0.1"
# The main interface is through Pybind11Extension.
# * You can add cxx_std=11/14/17, and then build_ext can be removed.
# * You can set include_pybind11=false to add the include directory yourself,
# say from a submodule.
#
# Note:
# Sort input source files if you glob sources to ensure bit-for-bit
# reproducible builds (https://github.com/pybind/python_example/pull/53)
defs = []
cxx_args = []
defs.append(('PARTIO_USE_ZLIB', None))
if platform.system() == 'Windows':
defs.append(('PARTIO_WIN32', None))
defs.append(('_USE_MATH_DEFINES', None))
elif platform.system() == 'Linux':
cxx_args = ["-fPIC", "-w"]
ext_modules = [
Pybind11Extension("partio",
["partio_bindings.cpp"] +
[
'../../../extern/partio/src/lib/core/Particle.cpp',
'../../../extern/partio/src/lib/core/ParticleCaching.cpp',
'../../../extern/partio/src/lib/core/ParticleHeaders.cpp',
'../../../extern/partio/src/lib/core/ParticleSimple.cpp',
'../../../extern/partio/src/lib/core/ParticleSimpleInterleave.cpp',
'../../../extern/partio/src/lib/io/BGEO.cpp',
'../../../extern/partio/src/lib/io/BIN.cpp',
'../../../extern/partio/src/lib/io/GEO.cpp',
'../../../extern/partio/src/lib/io/MC.cpp',
'../../../extern/partio/src/lib/io/ParticleIO.cpp',
'../../../extern/partio/src/lib/io/PDA.cpp',
'../../../extern/partio/src/lib/io/PDB.cpp',
'../../../extern/partio/src/lib/io/PDC.cpp',
'../../../extern/partio/src/lib/io/PRT.cpp',
'../../../extern/partio/src/lib/io/PTC.cpp',
'../../../extern/partio/src/lib/io/PTS.cpp',
'../../../extern/partio/src/lib/io/RIB.cpp',
'../../../extern/partio/src/lib/io/ZIP.cpp',
'../../../extern/zlib/src/adler32.c',
'../../../extern/zlib/src/compress.c',
'../../../extern/zlib/src/crc32.c',
'../../../extern/zlib/src/deflate.c',
'../../../extern/zlib/src/gzio.c',
'../../../extern/zlib/src/infback.c',
'../../../extern/zlib/src/inffast.c',
'../../../extern/zlib/src/inflate.c',
'../../../extern/zlib/src/inftrees.c',
'../../../extern/zlib/src/trees.c',
'../../../extern/zlib/src/uncompr.c',
'../../../extern/zlib/src/zutil.c'
],
include_dirs=['../../../extern/partio/src/lib', '../../../extern/zlib/src'],
# Example: passing in the version to the compiled code
define_macros=[('VERSION_INFO', __version__)] + defs,
cxx_std=14
),
]
setup(
name="partio",
version=__version__,
author="Stefan Jeske",
author_email="[email protected]",
description="Python Bindings for Partio using pybind11",
long_description="",
ext_modules=ext_modules,
# Currently, build_ext only provides an optional "highest supported C++
# level" feature, but in the future it may provide more features.
cmdclass={"build_ext": build_ext},
zip_safe=False,
) | mit | 5,408,965,410,109,457,000 | 43.597701 | 98 | 0.484403 | false |
7yl4r/eventsEngine | EventsEngine/Event.py | 1 | 1041 | '''
A base event class.
Subclasses should implement abstract methods "trigger" and "action".
'''
class Event(object):
def __init__(self,trigger=None,action=None):
self.trigger = trigger
self.action = action
self.initiated = False
self.completed = False
# currently set all events to start by default for testing
self.event_start()
def check(self):
''' checks the event for trigger condition satisfied '''
if self.trigger():
self.action()
return True
# else do nothing
def event_start(self):
self.initiated = True
def event_end(self):
self.completed = True
def get_event_name(self):
return self.trigger.function.__name__
def trigger(self):
raise NotImplementedError('Abstract method "trigger" should be implemented by subclass')
def action(self):
raise NotImplementedError('Abstract method "action" should be implemented by subclass') | gpl-2.0 | 8,409,640,018,674,820,000 | 27.162162 | 96 | 0.612872 | false |
youen/djity | djity/utils/inherit.py | 1 | 1641 | """
This module uses the django snippets 'Model inheritance with content type and
inheritance aware manager' (http://djangosnippets.org/snippets/1034/).
Using this module, instances of a model class and its subclasses can be accessed by the objects manager of the super class.
Usage:
from django.db import models
from django.contrib.contenttypes.models import ContentType
from djity.utils import SuperManager
class SuperClass(models.Model):
content_type = models.ForeignKey(ContentType,editable=False,null=True)
objects = SuperManager()
# other fields and methods...
def save(self,*args,**kwargs):
if(not self.content_type):
self.content_type = ContentType.objects.get_for_model(self.__class__)
super(SuperClass,self).save(*args,**kwargs)
def as_leaf_class(self):
content_type = self.content_type
model = content_type.model_class()
if (model == SuperClass):
return self
return model.objects.get(id=self.id)
class SubClass(SuperClass):
objects = SuperManager()
"""
from django.db import models
from django.db.models.query import QuerySet
class SubclassingQuerySet(QuerySet):
def __getitem__(self,k):
result = super(SubclassingQuerySet, self).__getitem__(k)
if isinstance(result,models.Model):
return result.as_leaf_class()
else:
return result
def __iter__(self):
for item in super(SubclassingQuerySet,self).__iter__():
yield item.as_leaf_class()
class SuperManager(models.Manager):
def get_query_set(self):
return SubclassingQuerySet(self.model)
| gpl-3.0 | 4,219,098,135,859,925,000 | 29.962264 | 123 | 0.686167 | false |
mredar/markupsafe | markupsafe/__init__.py | 1 | 10369 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return unicode(''.join(('&', name, ';')))
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
rv = string.Formatter.format_field(self, value, format_spec)
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| bsd-3-clause | 5,793,642,374,991,073,000 | 33.795302 | 82 | 0.568618 | false |
WeirdCarrotMonster/forest | forest/components/database.py | 1 | 2987 | # coding=utf-8
"""Модуль описывает функции получения синхронного и асинхронного подключения к базе."""
import motor
import pymongo
# pylint: disable=R0913,W0613
def get_connection_async(
host="127.0.0.1",
port=27017,
database="trunk",
user="admin",
password="password",
replica=None,
**kwargs
):
"""Возвращает асинхронное подключение к базе.
:param host: Хост, к которому выполняется подключение
:type host: str
:param port: Порт базы данных
:type port: int
:param user: Имя пользователя базы данных
:type user: str
:param password: Пароль пользователя базы данных
:type password: str
:param replica: Название replicaSet (при наличии)
:type replica: str
:param database: Имя базы данных
:type database: str
"""
if not replica:
con = motor.MotorClient(
"mongodb://{}:{}@{}:{}/{}".format(
user, password,
host, port,
database
))
else:
con = motor.MotorReplicaSetClient(
"mongodb://{}:{}@{}:{}/{}".format(
user, password,
host, port,
database
),
replicaSet=replica,
connectTimeoutMS=1500,
socketTimeoutMS=1500
)
return con[database]
def get_connection(
host="127.0.0.1",
port=27017,
database="trunk",
user="admin",
password="password",
replica=None,
**kwargs
):
"""Возвращает синхронное подключение к базе.
:param host: Хост, к которому выполняется подключение
:type host: str
:param port: Порт базы данных
:type port: int
:param user: Имя пользователя базы данных
:type user: str
:param password: Пароль пользователя базы данных
:type password: str
:param replica: Название replicaSet (при наличии)
:type replica: str
:param database: Имя базы данных
:type database: str
"""
if not replica:
con = pymongo.MongoClient(
"mongodb://{}:{}@{}:{}/{}".format(
user, password,
host, port,
database
))
else:
con = pymongo.MongoReplicaSetClient(
"mongodb://{}:{}@{}:{}/{}".format(
user, password,
host, port,
database
),
replicaSet=replica,
connectTimeoutMS=1500,
socketTimeoutMS=1500
)
return con[database]
| lgpl-3.0 | -2,785,771,197,535,213,600 | 25.295918 | 87 | 0.542491 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.