code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id$"
import os, sys, re
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
def _darwin_compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = 0
compiler_so = list(compiler_so)
kernel_version = os.uname()[2] # 8.4.3
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while 1:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
pass
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _darwin_compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def _is_gcc(self, compiler_name):
return "gcc" in compiler_name or "g++" in compiler_name
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
if self._is_gcc(compiler):
return ["-Wl,+s", "-L" + dir]
return ["+s", "-L" + dir]
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif self._is_gcc(compiler):
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/pkg/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
| Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86 | usr/pkg/lib/python2.7/distutils/unixccompiler.py | Python | mit | 14,430 |
# -*- coding: utf-8 -*-
import os
def filedir():
print u'我在这个目录:',os.getcwdu()
cwd = os.getcwdu()
print u'这个目录包含',os.listdir(cwd)
filelist = os.listdir(cwd)
L=[]
for item in filelist:
if os.path.isfile(item):
L.append(item)
print u'这个目录下的文件有',L
def pythonpath():
py = u'D:\\RobotFramework\\python'
count = 0
filelist = os.listdir(py)
for dirpath,dirnames,filenames in os.walk(py):
for item in filenames:
if item.endswith('.pyc'):
count = count + 1
print u'python安装文件夹下以pyc结尾的文件个数',count
if __name__ == '__main__':
filedir()
pythonpath()
| inkfountain/learn-py-a-little | lesson_os/practiceOS_2.py | Python | gpl-2.0 | 726 |
__author__ = 'mdavid'
import os
import sys
from unittest import TestLoader, TextTestRunner
if __name__ == '__main__':
tl = TestLoader()
master_test_suite = tl.discover(
start_dir=os.getcwd(),
pattern='test_*.py',
top_level_dir=os.path.join(os.getcwd(), '..')
)
result = TextTestRunner(verbosity=2).run(master_test_suite)
if result.errors or result.failures:
sys.exit(-1)
sys.exit(0) | netkicorp/addressimo | test/run_tests.py | Python | bsd-3-clause | 442 |
import unittest
from rowgenerators import get_generator
from rowgenerators import parse_app_url, get_cache
class TestIssues(unittest.TestCase):
def x_test_pass_target_format(self):
us = 'file:///Users/eric/Downloads/7908485365090507159.zip#VictimRecords.txt&target_format=csv'
u = parse_app_url(us, target_format=None)
print(u)
r = u.get_resource()
print(r)
t = r.get_target()
print(t)
g = t.generator
print(len(list(g)))
if __name__ == '__main__':
unittest.main()
| CivicKnowledge/rowgenerators | rowgenerators/test/test_issues.py | Python | mit | 556 |
# -*- coding: utf-8 -*-
import pprint
from datetime import datetime
class BaseResponse(dict):
class assertRaises:
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.failureException = AssertionError
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb): # pragma: no cover
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def __init__(self, data):
if data is None:
self.update({})
elif data.get('data'):
self.update(data['data'])
else:
self.update(data)
if data is not None and data.get('included'):
self['included'] = data['included']
@property
def id(self):
assert 'id' in self
try:
return int(self['id'])
except AssertionError: # pragma: no cover
raise AttributeError("Object id not found in response.")
@property
def created_on_datetime(self):
return self._format_datetime(self.get_attribute('created-on-datetime'))
def _format_datetime(self, date_time):
return datetime.strptime(date_time, '%Y-%m-%dT%H:%M:%S')
def get_attribute(self, attribute):
attribute = attribute.replace('_', '-')
assert 'attributes' in self
assert attribute in self['attributes'], attribute
try:
return self['attributes'][attribute]
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' not found in response." % attribute)
def _get_relationships(self, relationships):
relationships = relationships.replace('_', '-')
assert 'relationships' in self
assert relationships in self['relationships'], relationships
return self['relationships'][relationships]
def assertDataIsNone(self):
"""Assert the data attribute is None
"""
assert 'data' in self
assert self['data'] is None, self['data']
def assertHasId(self, value):
"""Assert the ID has value
"""
assert str(self.id) == str(value), "Expected id to be '{}' but was '{}'".format(value, self.id)
return self
def assertHasRelationshipRelated(self, relation_type, link):
"""Assert an error response has a specific pointer
"""
relation_type = relation_type.replace('_', '-')
assert 'relationships' in self
assert relation_type in self['relationships']
assert 'links' in self['relationships'][relation_type]
assert 'related' in self['relationships'][relation_type]['links']
try:
assert link in self['relationships'][relation_type]['links']['related']
except AssertionError: # pragma: no cover
raise AttributeError(
"assert '%s' in self['relationships']['%s']['links']['related']" % (link, relation_type))
return self
def assertHasRelationshipSelf(self, relation_type, link):
"""Assert an error response has a specific pointer
"""
relation_type = relation_type.replace('_', '-')
assert 'relationships' in self
assert relation_type in self['relationships'], relation_type
assert 'links' in self['relationships'][relation_type], relation_type
assert 'self' in self['relationships'][relation_type]['links'], relation_type
assert link in self['relationships'][relation_type]['links']['self'], link
return self
def assertHasAttribute(self, attribute, value):
"""Assert a response attribute has a specific value
"""
try:
assert self.get_attribute(attribute) == value
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' value '%s' not the expected one (%s)." %
(attribute, self.get_attribute(attribute), value))
return self
def assertAttributeNotPresent(self, attribute):
"""Assert a response doesn't contains an attribute
"""
try:
self.get_attribute(attribute)
raise AttributeError("Attribute '{}' was not found in response but we don't expect it.".format(attribute)) # pragma: no cover
except AssertionError:
pass
return self
def assertNotHasAttribute(self, attribute, value):
"""Assert a response attribute equals a specific value
"""
try:
assert self.get_attribute(attribute) != value
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' value '%s' is expected to be different then '%s'." %
(attribute, self.get_attribute(attribute), value))
return self
def assertHasRelationshipData(self, relationships, value, obj_type):
"""Assert a response relation has a specific value
"""
rel = self._get_relationships(relationships)
if value is None: # pragma: no cover
assert rel['data'] is None
assert rel['data'] is not None
try:
assert 'id' in rel['data']
assert rel['data']['id'] == str(value)
except AssertionError: # pragma: no cover
raise AttributeError("Relationships '%s' value should be '%s' but was '%s'." %
(relationships, value, rel['data']['id']))
try:
assert 'type' in rel['data']
assert rel['data']['type'] == obj_type
except AssertionError: # pragma: no cover
raise AttributeError("Relationships '%s' should be '%s' but was '%s'." %
(relationships, obj_type, rel['data']['type']))
return self
def assertHasRelationshipDatas(self, relationships, values, obj_type):
"""Assert a response relation has specific values
"""
rel = self._get_relationships(relationships)
if values is None: # pragma: no cover
assert rel['data'] is None, rel
assert rel['data'] is not None, rel
str_values = [str(value.id) for value in values]
found_ids = []
try:
# returned data in expected list
for data in rel['data']:
assert 'id' in data
assert data['id'] in str_values, data['id']
assert 'type' in data
assert data['type'] == obj_type
found_ids.append(data['id'])
# expect to find all expected values in response
for value in str_values:
assert value in found_ids
except AssertionError: # pragma: no cover
raise AttributeError("Included relationships '%s' not found in response, expected %s, found %s." % (
relationships, str_values, rel['data']))
return self
def assertHasData(self, obj_type, value):
"""Assert a response has a specific data value
"""
assert 'type' in self, "'type' key not found in 'data'"
assert 'id' in self, "'id' key not found in 'data'"
assert self['type'] == obj_type, "type '{}' expected but found '{}'".format(obj_type, self['type'])
assert self['id'] == value, "id '{}' expected but found '{}'".format(value, self['id'])
return self
def assertCreationDateTime(self):
self.assertDateTimePresent('created-on-datetime')
return self
def assertUpdatedDateTime(self):
self.assertDateTimePresent('updated-on-datetime')
return self
def assertHasAttributeDateTimeOrNone(self, attribute, date_time):
if date_time is None:
return
self.assertHasAttributeDateTime(attribute, date_time)
return self
def assertHasAttributeDateTime(self, attribute, date_time):
self.assertDateTimePresent(attribute)
if isinstance(date_time, datetime):
date_time = date_time.strftime("%Y-%m-%dT%H:%M:%S")
assert self.get_attribute(attribute)[:-1] == date_time[:-1]
return self
def assertDateTimePresent(self, attribute):
datetime = self.get_attribute(attribute)
self.assertIsDateTime(datetime)
return self
def assertIsDateTime(self, date_time):
if isinstance(date_time, datetime): # pragma: no cover
return self
try:
datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%S")
except ValueError: # pragma: no cover
assert False, 'Date is not parsable (%s)' % date_time
return self
def assertRaiseJsonApiError(self, pointer):
"""Assert an error response has a specific pointer
"""
assert 'errors' in self
for error in self['errors']:
assert 'source' in error
assert 'pointer' in error['source']
if pointer in error['source']['pointer']:
return self
assert False, "JsonApiError pointer '{}' not raised".format(pointer) # pragma: no cover
def assertJsonApiErrorCount(self, count):
"""Assert an error response has a specific number of entries
"""
assert 'errors' in self, "No error found but we expect to see {}".format(count)
assert len(self['errors']) == count, "Expected to find {} errors, but was {}" \
.format(count, self.count)
return self
def assertDateTimeAlmostEqual(self, first, second, delta=1):
""" Compare two datetime attributes, accept maximum difference
of `delta` seconds.
"""
first_attribute = datetime.strptime(self.get_attribute(first), "%Y-%m-%dT%H:%M:%S")
second_attribute = datetime.strptime(self.get_attribute(second), "%Y-%m-%dT%H:%M:%S")
computed_delta = (first_attribute - second_attribute).seconds
assert computed_delta == 0 or computed_delta == 1
def pprint(self): # pragma: no cover
pprint.pprint(self)
return self
| geokrety/geokrety-api | tests/unittests/utils/responses/base.py | Python | gpl-3.0 | 10,902 |
# -*- coding: utf-8 -*-
"""
script to run a local, self contained server
python 3.6 or higher required
currently is a candidate to use asyncio
"""
import logging
import time
import platform
from subprocess import Popen, call
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('tailor.launcher')
processes = (
('service_cmd', 'apps/service/__main__.py'),
('kiosk_server_cmd', 'apps/server/__main__.py'),
('kiosk_cmd', 'apps/kiosk/__main__.py'))
# TODO: find python cmd name on whatever platform
system = platform.system()
if system == 'Linux':
python_cmd = '/usr/bin/python3'
elif system == 'Windows':
python_cmd = "C:\\Users\\Leif\\AppData\\Local\\Programs\\Python\\Python36\\python.exe"
# TODO: use subprocess.run
def run_processes():
for name, cmd in processes:
logger.debug('starting process %s', name)
args = [python_cmd, cmd]
proc = Popen(args)
time.sleep(2)
yield proc
if __name__ == '__main__':
# TODO: check for running in gnome environment
# TODO: release_gvfs_from_camera fails in windows. provide better check in future
if system == 'Linux':
from tailor.platform.unix import release_gvfs_from_camera
try:
release_gvfs_from_camera()
except FileNotFoundError:
pass
running_processes = list()
try:
for proc in run_processes():
running_processes.append(proc)
running = True
while running:
for proc in running_processes:
value = proc.poll()
if value is not None:
logger.debug('one process has quit')
running = False
break
time.sleep(.1)
except:
import traceback
traceback.print_last()
logger.debug('an exception was raised and program will now terminate')
finally:
logger.debug('cleaning up...')
for proc in running_processes:
try:
start = time.time()
while proc.poll() is None:
if time.time() - start > 10:
break
try:
proc.terminate()
proc.kill()
except EnvironmentError:
pass
# TODO: find the correct exception to catch
except:
pass
# werkzerg/flask refuses to close using subprocess
# so here is my heavy handed hack until i get it
# figured out
# TODO: better process cleanup
if system == 'Linux':
try:
call(['killall', '-KILL', 'python3'], timeout=10)
except FileNotFoundError:
pass
| bitcraft/tailor | run_local.py | Python | gpl-3.0 | 2,771 |
#!/usr/bin/env python
from __future__ import with_statement
import sys
import datetime
import ConfigParser
sys.path.insert(0,"/usr/lib/dialcentral/")
import constants
import alarm_notify
def notify_on_change():
filename = "%s/notification.log" % constants._data_path_
with open(constants._notifier_logpath_, "a") as file:
file.write("Notification: %r\n" % (datetime.datetime.now(), ))
config = ConfigParser.SafeConfigParser()
config.read(constants._user_settings_)
backend = alarm_notify.create_backend(config)
notifyUser = alarm_notify.is_changed(config, backend)
if notifyUser:
file.write("\tChange occurred\n")
if __name__ == "__main__":
notify_on_change()
| epage/dialcentral-gtk | src/examples/log_notifier.py | Python | lgpl-2.1 | 690 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add standardattr to qos policies
Revision ID: 67daae611b6e
Revises: a5648cfeeadf
Create Date: 2016-08-18 14:10:30.021015
"""
revision = '67daae611b6e'
down_revision = '0f5bef0f87d4'
from alembic import op
import sqlalchemy as sa
TABLE = 'qos_policies'
def upgrade():
op.add_column(TABLE, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))
| eayunstack/neutron | neutron/db/migration/alembic_migrations/versions/newton/expand/67daae611b6e_add_standard_attr_to_qos_policies.py | Python | apache-2.0 | 978 |
import decimal
from twisted.trial.unittest import SkipTest, TestCase
from jsonutil.jsonutil import decoder
from jsonutil.jsonutil import encoder
class TestSpeedups(TestCase):
def test_scanstring(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(decoder.scanstring.__module__, "simplejson._speedups")
self.assert_(decoder.scanstring is decoder.c_scanstring)
def test_encode_basestring_ascii(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(encoder.encode_basestring_ascii.__module__, "simplejson._speedups")
self.assert_(encoder.encode_basestring_ascii is
encoder.c_encode_basestring_ascii)
| zookos/jsonutil | jsonutil/test/json_tests/test_speedups.py | Python | gpl-2.0 | 854 |
"""Test UniFi Controller."""
from collections import deque
from datetime import timedelta
import aiounifi
from asynctest import Mock, patch
import pytest
from homeassistant import config_entries
from homeassistant.components import unifi
from homeassistant.components.unifi.const import (
CONF_CONTROLLER,
CONF_SITE_ID,
UNIFI_CONFIG,
UNIFI_WIRELESS_CLIENTS,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.exceptions import ConfigEntryNotReady
CONTROLLER_HOST = {
"hostname": "controller_host",
"ip": "1.2.3.4",
"is_wired": True,
"last_seen": 1562600145,
"mac": "10:00:00:00:00:01",
"name": "Controller host",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CONTROLLER_DATA = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: False,
}
ENTRY_CONFIG = {CONF_CONTROLLER: CONTROLLER_DATA}
SITES = {"Site name": {"desc": "Site name", "name": "site_id", "role": "admin"}}
async def setup_unifi_integration(
hass,
config,
options,
sites,
clients_response,
devices_response,
clients_all_response,
):
"""Create the UniFi controller."""
if UNIFI_CONFIG not in hass.data:
hass.data[UNIFI_CONFIG] = []
hass.data[UNIFI_WIRELESS_CLIENTS] = unifi.UnifiWirelessClients(hass)
config_entry = config_entries.ConfigEntry(
version=1,
domain=unifi.DOMAIN,
title="Mock Title",
data=config,
source="test",
connection_class=config_entries.CONN_CLASS_LOCAL_POLL,
system_options={},
options=options,
entry_id=1,
)
mock_client_responses = deque()
mock_client_responses.append(clients_response)
mock_device_responses = deque()
mock_device_responses.append(devices_response)
mock_client_all_responses = deque()
mock_client_all_responses.append(clients_all_response)
mock_requests = []
async def mock_request(self, method, path, json=None):
mock_requests.append({"method": method, "path": path, "json": json})
if path == "s/{site}/stat/sta" and mock_client_responses:
return mock_client_responses.popleft()
if path == "s/{site}/stat/device" and mock_device_responses:
return mock_device_responses.popleft()
if path == "s/{site}/rest/user" and mock_client_all_responses:
return mock_client_all_responses.popleft()
return {}
with patch("aiounifi.Controller.login", return_value=True), patch(
"aiounifi.Controller.sites", return_value=sites
), patch("aiounifi.Controller.request", new=mock_request):
await unifi.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
hass.config_entries._entries.append(config_entry)
controller_id = unifi.get_controller_id_from_config_entry(config_entry)
if controller_id not in hass.data[unifi.DOMAIN]:
return None
controller = hass.data[unifi.DOMAIN][controller_id]
controller.mock_client_responses = mock_client_responses
controller.mock_device_responses = mock_device_responses
controller.mock_client_all_responses = mock_client_all_responses
controller.mock_requests = mock_requests
return controller
async def test_controller_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
entry = controller.config_entry
assert len(forward_entry_setup.mock_calls) == len(
unifi.controller.SUPPORTED_PLATFORMS
)
assert forward_entry_setup.mock_calls[0][1] == (entry, "device_tracker")
assert forward_entry_setup.mock_calls[1][1] == (entry, "sensor")
assert forward_entry_setup.mock_calls[2][1] == (entry, "switch")
assert controller.host == CONTROLLER_DATA[CONF_HOST]
assert controller.site == CONTROLLER_DATA[CONF_SITE_ID]
assert controller.site_name in SITES
assert controller.site_role == SITES[controller.site_name]["role"]
assert (
controller.option_allow_bandwidth_sensors
== unifi.const.DEFAULT_ALLOW_BANDWIDTH_SENSORS
)
assert controller.option_block_clients == unifi.const.DEFAULT_BLOCK_CLIENTS
assert controller.option_track_clients == unifi.const.DEFAULT_TRACK_CLIENTS
assert controller.option_track_devices == unifi.const.DEFAULT_TRACK_DEVICES
assert (
controller.option_track_wired_clients == unifi.const.DEFAULT_TRACK_WIRED_CLIENTS
)
assert controller.option_detection_time == timedelta(
seconds=unifi.const.DEFAULT_DETECTION_TIME
)
assert controller.option_ssid_filter == unifi.const.DEFAULT_SSID_FILTER
assert controller.mac is None
assert controller.signal_update == "unifi-update-1.2.3.4-site_id"
assert controller.signal_options_update == "unifi-options-1.2.3.4-site_id"
async def test_controller_mac(hass):
"""Test that it is possible to identify controller mac."""
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[CONTROLLER_HOST],
devices_response=[],
clients_all_response=[],
)
assert controller.mac == "10:00:00:00:00:01"
async def test_controller_import_config(hass):
"""Test that import configuration.yaml instructions work."""
hass.data[UNIFI_CONFIG] = [
{
CONF_HOST: "1.2.3.4",
CONF_SITE_ID: "Site name",
unifi.const.CONF_ALLOW_BANDWIDTH_SENSORS: True,
unifi.CONF_BLOCK_CLIENT: ["random mac"],
unifi.CONF_DONT_TRACK_CLIENTS: True,
unifi.CONF_DONT_TRACK_DEVICES: True,
unifi.CONF_DONT_TRACK_WIRED_CLIENTS: True,
unifi.CONF_DETECTION_TIME: 150,
unifi.CONF_SSID_FILTER: ["SSID"],
}
]
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
assert controller.option_allow_bandwidth_sensors is False
assert controller.option_block_clients == ["random mac"]
assert controller.option_track_clients is False
assert controller.option_track_devices is False
assert controller.option_track_wired_clients is False
assert controller.option_detection_time == timedelta(seconds=150)
assert controller.option_ssid_filter == ["SSID"]
async def test_controller_not_accessible(hass):
"""Retry to login gets scheduled when connection fails."""
with patch.object(
unifi.controller, "get_controller", side_effect=unifi.errors.CannotConnect
), pytest.raises(ConfigEntryNotReady):
await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
async def test_controller_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(unifi.controller, "get_controller", side_effect=Exception):
await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
assert hass.data[unifi.DOMAIN] == {}
async def test_reset_after_successful_setup(hass):
"""Calling reset when the entry has been setup."""
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
assert len(controller.listeners) == 5
result = await controller.async_reset()
await hass.async_block_till_done()
assert result is True
assert len(controller.listeners) == 0
async def test_failed_update_failed_login(hass):
"""Running update can handle a failed login."""
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
with patch.object(
controller.api.clients, "update", side_effect=aiounifi.LoginRequired
), patch.object(controller.api, "login", side_effect=aiounifi.AiounifiException):
await controller.async_update()
await hass.async_block_till_done()
assert controller.available is False
async def test_failed_update_successful_login(hass):
"""Running update can login when requested."""
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
with patch.object(
controller.api.clients, "update", side_effect=aiounifi.LoginRequired
), patch.object(controller.api, "login", return_value=Mock(True)):
await controller.async_update()
await hass.async_block_till_done()
assert controller.available is True
async def test_failed_update(hass):
"""Running update can login when requested."""
controller = await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
sites=SITES,
clients_response=[],
devices_response=[],
clients_all_response=[],
)
with patch.object(
controller.api.clients, "update", side_effect=aiounifi.AiounifiException
):
await controller.async_update()
await hass.async_block_till_done()
assert controller.available is False
await controller.async_update()
await hass.async_block_till_done()
assert controller.available is True
async def test_get_controller(hass):
"""Successful call."""
with patch("aiounifi.Controller.login", return_value=Mock()):
assert await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_verify_ssl_false(hass):
"""Successful call with verify ssl set to false."""
controller_data = dict(CONTROLLER_DATA)
controller_data[CONF_VERIFY_SSL] = False
with patch("aiounifi.Controller.login", return_value=Mock()):
assert await unifi.controller.get_controller(hass, **controller_data)
async def test_get_controller_login_failed(hass):
"""Check that get_controller can handle a failed login."""
result = None
with patch("aiounifi.Controller.login", side_effect=aiounifi.Unauthorized):
try:
result = await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
except unifi.errors.AuthenticationRequired:
pass
assert result is None
async def test_get_controller_controller_unavailable(hass):
"""Check that get_controller can handle controller being unavailable."""
result = None
with patch("aiounifi.Controller.login", side_effect=aiounifi.RequestError):
try:
result = await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
except unifi.errors.CannotConnect:
pass
assert result is None
async def test_get_controller_unknown_error(hass):
"""Check that get_controller can handle unkown errors."""
result = None
with patch("aiounifi.Controller.login", side_effect=aiounifi.AiounifiException):
try:
result = await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
except unifi.errors.AuthenticationRequired:
pass
assert result is None
| leppa/home-assistant | tests/components/unifi/test_controller.py | Python | apache-2.0 | 12,269 |
import astropy, astropy.io.fits as pyfits, numpy, scipy, sys, re,pylab
def mypoly(a, x):
#n = n terms in fit
# float *a = poly terms,
# x; = val array
y = [1.]
t = [x]
print 'printing x'
print x
z=y[0]*t[0]
#NORMPOINT = 10000
for i in range(1,len(a)+1):
t.append(t[i-1]*x)
y.append(a[i-1])
print 'i='+str(i) + ' a = '+str(a[i-1])
for i in range(1,len(a)+1):
print t[i]
print y[i]
z+=t[i]*y[i]
# for (i=0; i<n; i++) {
# y += a[i]*t
# t *= x
#print y
#
# y *= x
#NORMPOINT = 10000
#scale = NORMPOINT/mypoly(a, NORMPOINT)
return z
def mypoly_mult(a, x):
#n = n terms in fit
# float *a = poly terms,
# x; = val array
y = [1.]
t = [1.]
print 'printing x'
print x
#z=y[0]*t[0]
z=0
#NORMPOINT = 10000
for i in range(1,len(a)+1):
t.append(t[i-1]*x)
y.append(a[i-1])
print 'i='+str(i) + ' a = '+str(a[i-1])
for i in range(1,len(a)+1):
print t[i]
print y[i]
z+=t[i]*y[i]
# for (i=0; i<n; i++) {
# y += a[i]*t
# t *= x
#print y
#
# y *= x
#NORMPOINT = 10000
#scale = NORMPOINT/mypoly(a, NORMPOINT)
return z
def mypoly_char(txt,x):
# here txt must be the early chip name.
params={}
params['w9c2']=[-6.85636e-05,
7.34159e-09,
-3.49597e-13,
6.25578e-18]
params['w4c5']=[-0.000229013,
4.99811e-08,
-5.86075e-12,
3.40795e-16,
-5.24326e-21,
-3.25813e-25,
1.12422e-29]
params['w6c1']= [-7.02288e-05,
7.6895e-09,
-3.75244e-13,
6.88234e-18]
params['w7c3']= [-0.000218723,
4.55178e-08,
-5.06917e-12,
2.77035e-16,
-3.58369e-21,
-2.74045e-25,
8.65118e-30]
if txt in ['w9c2', 'w4c5','w6c1','w7c3']:
return mypoly(params[txt],x)
else:
print 'no correction for :', txt
return x
def mypoly_new_char(txt,x):
# here txt must be the early chip name.
params={}
params['w9c2']=[ 1.17970e+00,
-3.76728e-05,
1.53093e-09,
1.56436e-13,
-1.63457e-17,
5.45417e-22,
-6.33405e-27]
params['w4c5']=[ -0.000229013,
4.99811e-08,
-5.86075e-12,
3.40795e-16,
-5.24326e-21,
-3.25813e-25,
1.12422e-29]
params['w6c1']= [9.64368e-01,
5.96304e-05,
-1.55794e-08,
1.65423e-12,
-8.58425e-17,
2.17863e-21,
-2.16169e-26]
params['w7c3']= [-0.000218723,
4.55178e-08,
-5.06917e-12,
2.77035e-16,
-3.58369e-21,
-2.74045e-25,
8.65118e-30]
params['si005s']=[9.45486e-01,
3.06580e-05,
-7.32848e-09,
9.19088e-13,
-5.99454e-17,
1.95305e-21,
-2.51130e-26]
params['si001s']=[8.89229e-01,
8.05488e-05,
-2.45696e-08,
3.87417e-12,
-3.24538e-16,
1.37049e-20,
-2.28946e-25]
params['si006s']=[4.27821e-01,
3.67858e-04,
-9.41304e-08,
1.21898e-11,
-8.40626e-16,
2.93915e-20,
-4.09096e-25]
params['si002s']=[7.13973e-01,
2.04011e-04,
-5.89538e-08,
8.69446e-12,
-6.83755e-16,
2.73065e-20,
-4.34803e-25]
if txt in ['w9c2', 'w4c5','w6c1','w7c3','si001s','si002s','si005s','si006s']:
tot = 0
y=[x]
for i in range(1,len(params[txt])):
y.append(x*y[i-1])
for i in range(len(params[txt])):
tot+= params[txt][i]*y[i]
return tot
#return mypoly(params[txt],x)
else:
print 'no correction for :', txt
return x
| deapplegate/wtgpipeline | subarucorr.py | Python | mit | 4,836 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import xmpp, atexit, json, codecs, re
import os.path
import schedule
class Messenger:
""" для общения """
def __init__(self, login, password, onmessage=None):
""" конструктор """
self.login = login
self.password = password
self.onmessage = onmessage
self
def connect(self):
""" коннектится """
self
def message(self, msg, from_id):
""" получает сообщение """
self.onmessage(msg, from_id)
def say(self, msg, to_id):
""" отправляет сообщение списку реципиентов """
self
def run_once(self):
""" шаг """
self
def disconnect(self):
""" отключаемся """
self
class JabberMessenger(Messenger):
""" общение через джаббер """
def connect(self):
""" коннектится """
jid = xmpp.protocol.JID(self.login)
self.jabber = jabber = xmpp.Client(jid.getDomain(), debug=[])
jabber.connect(secure=0)
jabber.auth(jid.getNode(), str(self.password), resource='xmpppy')
# регистрируем обработчики
def onmessage(jabber, msg):
#print 'message_callback', msg.getType(), msg.getID(), msg.getFrom(), msg.getThread(), msg.getSubject(), msg.getBody(), repr(msg.getProperties())
s = msg.getBody()
if not s:
return
jid = msg.getFrom()
from_id = "%s@%s" % (jid.getNode(), jid.getDomain())
self.message(s, from_id)
return
jabber.RegisterHandler('message', onmessage)
jabber.sendInitPresence()
#print jabber.connected
#assert jabber.connected, "приконнектился"
def say(self, msg, to):
""" отправляет сообщение списку реципиентов """
msg += u""
for jid in to:
self.jabber.send(xmpp.protocol.Message(jid, msg))
def run_once(self):
""" шаг """
self.jabber.Process(1)
def disconnect(self):
""" отключаемся """
self.jabber.disconnect()
class Mishel:
""" Мишель """
def __init__(self, messenger, id_masters, save_path):
''' конструктор '''
self.messenger = messenger
messenger.onmessage = lambda msg, to_id: self.message(msg, to_id)
assert isinstance(id_masters, list), u"требуется список мастеров"
assert len(id_masters)>0, u"список мастеров не должен быть пустым"
self.id_masters = id_masters
assert save_path, u"не указан путь к ini-файлу json"
self.save_path = save_path
self.load()
def load(self):
""" загружает данные """
save_path = self.save_path
if os.path.isfile(save_path) and os.path.getsize(save_path) != 0:
with codecs.open(save_path, "rb", encoding='utf8') as f:
self.ion = json.load(f)
else:
self.ion = {
'tz': [], # задания
'idx': 0, # номер задания
}
def save(self):
''' выполняется при завершении и сохраняет данные '''
with codecs.open(self.save_path, "wb", encoding='utf8') as f:
json.dump(self.ion, f)
def message(self, msg, from_id):
''' пришло сообщение '''
msg += u""
if from_id not in self.id_masters:
self.say(u"Написал %s: %s" % (from_id, msg))
return self
#jabber.send(xmpp.protocol.Message(from_id, u"вау! приветики! ты сказал: %s" % s))
tail = [""]
def test(m):
m = re.sub(ur'\s+', ur'\s+', m)
#print m, type(m)
g = re.match(ur"\s*%s[ \t]*" % m, msg)
#print g
if not g:
return False
tail[0] = msg[g.end():]
return True
if test(r"список команд|\?"):
self.say(u"1. список команд или ?\n"+
u"2. добавь: <задание>\n"+
u"3. замени <номер задания>: <задание>\n"+
u"4. удали <номер задания>\n"+
u"5. покажи задания\n"
)
elif test("добавь:"):
tz = self.ion["tz"]
if None in tz:
idx = tz.index(None)
tz[idx] = tail[0]
else:
idx = len(tz)
tz.append(tail[0])
self.say("добавила. № %i" % (idx+1))
self.save()
elif test("замени"):
try:
idx, x = tail[0].split(":")
tz[int(idx)] = x.lstrip()
except:
self.say("не могу :(")
return
self.say("заменила")
self.save()
elif test("удали"):
tz = self.ion["tz"]
try:
idx = abs(int(tail[0]))
idx -= 1
val = tz[idx]
except:
val = None
if val is None:
self.say("а номер задания какой?")
return self
if idx+1 == len(tz):
tz.pop()
while len(tz) and tz[-1] is None:
tz.pop()
else:
tz[idx] = None
self.say("удалила")
self.save()
elif test("покажи задания"):
# берём первую строку каждого задания
tz = self.ion["tz"]
if len(tz) == 0:
self.say("заданий пока нет")
return self
ls = []
for i, x in enumerate(tz):
if not x is None:
first_line = re.match(ur".*", x).group(0)
ls.append(u"%i. %s" % (i+1, first_line))
self.say("\n".join(ls))
else:
self.say("не понимаю")
def say(self, msg, to=None):
""" отправляет сообщение """
if to is None:
to = self.id_masters
self.messenger.say(msg, to)
def task(self):
""" выдаёт сообщение о задании """
tz = self.ion["tz"]
if len(tz)==0:
return self
idx = self.ion["idx"]
if idx >= len(tz):
self.ion["idx"] = idx = 0
else:
self.ion["idx"]+=1
s = tz[idx]
first_line = re.match(ur".*", s).group(0)
self.say("переключайся на задачу № %i: %s\n" % (idx+1, first_line))
if __name__ == '__main__':
save_path = 'mishel.json'
xmpp_jid = '[email protected]'
xmpp_pwd = 'detiOkeana12'
xmpp_to = ['[email protected]']
#msg = 'Привет!'
messenger = JabberMessenger(xmpp_jid, xmpp_pwd)
mishel = Mishel(messenger, xmpp_to, save_path)
messenger.connect()
@atexit.register
def destroy():
''' на выход из программы '''
#mishel.save()
print "disconnect"
messenger.disconnect()
for i in xrange(0, 45, 5):
schedule.every().hour.at("00:%i" % i).do(lambda: mishel.task())
mishel.task()
# https://github.com/dbader/schedule
# schedule.every(10).minutes.do(job)
# schedule.every().hour.do(job)
# schedule.every().day.at("10:30").do(job)
print "start"
# бесконечный цикл
while 1:
schedule.run_pending()
messenger.run_once()
| darviarush/rubin-forms | ex/mishel/mishel.py | Python | bsd-2-clause | 6,721 |
comb= combs.newLoadCombination("ELU001","1.00*G")
comb= combs.newLoadCombination("ELU002","1.35*G")
comb= combs.newLoadCombination("ELU003","1.00*G + 1.50*SC")
comb= combs.newLoadCombination("ELU004","1.00*G + 1.50*SC + 0.90*NV")
comb= combs.newLoadCombination("ELU005","1.00*G + 1.50*SC + 0.90*VT")
comb= combs.newLoadCombination("ELU006","1.00*G + 1.50*SC + 0.90*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU007","1.00*G + 1.50*VT")
comb= combs.newLoadCombination("ELU008","1.00*G + 1.50*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU009","1.00*G + 1.05*SC + 1.50*VT")
comb= combs.newLoadCombination("ELU010","1.00*G + 1.05*SC + 1.50*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU011","1.00*G + 1.50*NV")
comb= combs.newLoadCombination("ELU012","1.00*G + 0.90*VT + 1.50*NV")
comb= combs.newLoadCombination("ELU013","1.00*G + 1.05*SC + 1.50*NV")
comb= combs.newLoadCombination("ELU014","1.00*G + 1.05*SC + 0.90*VT + 1.50*NV")
comb= combs.newLoadCombination("ELU015","1.35*G + 1.50*SC")
comb= combs.newLoadCombination("ELU016","1.35*G + 1.50*SC + 0.90*NV")
comb= combs.newLoadCombination("ELU017","1.35*G + 1.50*SC + 0.90*VT")
comb= combs.newLoadCombination("ELU018","1.35*G + 1.50*SC + 0.90*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU019","1.35*G + 1.50*VT")
comb= combs.newLoadCombination("ELU020","1.35*G + 1.50*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU021","1.35*G + 1.05*SC + 1.50*VT")
comb= combs.newLoadCombination("ELU022","1.35*G + 1.05*SC + 1.50*VT + 0.90*NV")
comb= combs.newLoadCombination("ELU023","1.35*G + 1.50*NV")
comb= combs.newLoadCombination("ELU024","1.35*G + 0.90*VT + 1.50*NV")
comb= combs.newLoadCombination("ELU025","1.35*G + 1.05*SC + 1.50*NV")
comb= combs.newLoadCombination("ELU026","1.35*G + 1.05*SC + 0.90*VT + 1.50*NV")
| lcpt/xc | verif/tests/aux/def_hip_elu.py | Python | gpl-3.0 | 1,780 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Coordinate search."""
import os
from string import Template
import sys
from search.common import exceptions
from search.common import geconstants
from search.common import utils
from search.plugin import coordinate_transform
class CoordinateSearch(object):
"""Class for performing the Coordinate search.
Coordinate search supports the following formats:
1. Decimal Degrees (DD)
2. Degrees Minutes Seconds (DMS)
3. Degrees Decimal Minutes (DDM)
4. Military Grid Reference System (MGRS)
5. Universal Transverse Mercator (UTM)
Coordinate search transforms coordinates from DMS, DDM, UTM, MGRS formats to
DD, validates the coordinates and sends the response back to the client.
Depending on the client type, KML or JSONP formats are supported.
"""
NUM_OF_COORDS_IN_LAT_LNG_FORMAT = 2
NUM_OF_COORDS_IN_MGRS_FORMAT = 1
def __init__(self):
"""Inits CoordinateSearch.
Initializes the logger "ge_search".
Initializes templates for kml, placemark templates for KML/JSONP outputs.
"""
self.utils = utils.SearchUtils()
self._transform = coordinate_transform.CoordinateTransform()
configs = self.utils.GetConfigs(
os.path.join(geconstants.SEARCH_CONFIGS_DIR, "CoordinateSearch.conf"))
self._jsonp_call = self.utils.jsonp_functioncall
self._geom = """
<name>%s</name>
<styleUrl>%s</styleUrl>
<Point>
<coordinates>%s,%s</coordinates>
</Point>\
"""
self._json_geom = """
{
"Point": {
"coordinates": "%s,%s"
}
}
"""
self._kml = """
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2"
xmlns:kml="http://www.opengis.net/kml/2.2"
xmlns:atom="http://www.w3.org/2005/Atom">
<Folder>
<name>Coordinate Search Results</name>
<open>1</open>
<Style id="placemark_label">\
${style}
</Style>\
${placemark}
</Folder>
</kml>
"""
self._kml_template = Template(self._kml)
self._placemark_template = self.utils.placemark_template
self._json_template = self.utils.json_template
self._json_placemark_template = self.utils.json_placemark_template
style_template = self.utils.style_template
self.coordinates_in_lat_lng_format_ = ["DD", "DMS", "DDM"]
self.logger = self.utils.logger
self._style = style_template.substitute(
balloonBgColor=configs.get("balloonstyle.bgcolor"),
balloonTextColor=configs.get("balloonstyle.textcolor"),
balloonText=configs.get("balloonstyle.text"),
iconStyleScale=configs.get("iconstyle.scale"),
iconStyleHref=configs.get("iconstyle.href"),
lineStyleColor=configs.get("linestyle.color"),
lineStyleWidth=configs.get("linestyle.width"),
polyStyleColor=configs.get("polystyle.color"),
polyStyleColorMode=configs.get("polystyle.colormode"),
polyStyleFill=configs.get("polystyle.fill"),
polyStyleOutline=configs.get("polystyle.outline"),
listStyleHref=configs.get("iconstyle.href"))
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the coordinate search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the coordinate search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
search_results = ""
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
response_type = self.utils.GetResponseType(environ)
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
original_query = self.utils.GetValue(parameters, "q")
if not original_query:
msg = "Empty search query received."
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
search_status, search_results = self.DoSearch(original_query, response_type)
if not search_status:
folder_name = "Search returned no results."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, search_query, response_type):
"""Performs the coordinate search.
Args:
search_query: A string containing the search coordinates as
entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
coordinate_type = ""
search_results = ""
input_coordinates = []
decimal_degrees_coordinates = []
search_tokens = self.utils.SearchTokensFromString(search_query)
self.logger.debug("coordinates: %s", ",".join(search_tokens))
input_coordinates = self._transform.GetInputCoordinates(
",".join(search_tokens))
number_of_coordinates = len(input_coordinates)
if number_of_coordinates == 0:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
coordinate_type = self._transform.GetInputType(input_coordinates)
self.logger.debug("Coordinate type is %s.", coordinate_type)
if coordinate_type in self.coordinates_in_lat_lng_format_:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_LAT_LNG_FORMAT
else:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_MGRS_FORMAT
if number_of_coordinates > reqd_num_of_coordinates:
self.logger.warning(
"extra search parameters ignored: %s", ",".join(
input_coordinates[reqd_num_of_coordinates:]))
input_coordinates = input_coordinates[:reqd_num_of_coordinates]
elif number_of_coordinates < reqd_num_of_coordinates:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
decimal_degrees_coordinates = self._transform.TransformToDecimalDegrees(
coordinate_type, input_coordinates)
search_results = self.ConstructResponse(
response_type, decimal_degrees_coordinates)
search_status = True if search_results else False
return search_status, search_results
def ConstructKMLResponse(self, latitude, longitude):
"""Prepares KML response.
KML response has the below format:
<kml>
<Folder>
<name/>
<StyleURL>
---
</StyleURL>
<Point>
<coordinates/>
</Point>
</Folder>
</kml>
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
kml_response: KML formatted response.
"""
placemark = ""
kml_response = ""
name = "%s, %s" % (latitude, longitude)
style_url = "#placemark_label"
geom = self._geom % (name, style_url, str(longitude), str(latitude))
placemark = self._placemark_template.substitute(geom=geom)
kml_response = self._kml_template.substitute(
style=self._style, placemark=placemark)
self.logger.info("KML response successfully formatted")
return kml_response
def ConstructJSONPResponse(self, latitude, longitude):
"""Prepares JSONP response.
{
"Folder": {
"name": "X,Y",
"Style": {
"IconStyle": {"scale": "1" },
"LineStyle": {
"color": "7fffff00",
"width": "5" },
"PolyStyle": {
"color": "7f66ffff",
"fill": "1",
"outline": "1" } },
"Placemark": {
"Point": {
"coordinates": "X,Y" } }
}
}
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
jsonp_response: JSONP formatted response.
"""
placemark = ""
json_response = ""
jsonp_response = ""
folder_name = "%s, %s" % (latitude, longitude)
json_geom = self._json_geom % (latitude, longitude)
placemark = self._json_placemark_template.substitute(
geom=json_geom)
json_response = self._json_template.substitute(
foldername=folder_name, json_placemark=placemark)
# Escape single quotes from json_response.
json_response = json_response.replace("'", "\\'")
jsonp_response = self._jsonp_call % (self.f_callback, json_response)
self.logger.info("JSONP response successfully formatted")
return jsonp_response
def ConstructResponse(self, response_type, decimal_degrees_coordinates):
"""Construct the response based on response_type.
Args:
response_type: Response type can be KML or JSONP, depending on the client.
decimal_degrees_coordinates: List of coordinates in DD(Decimal Degrees)
format.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
"""
search_results = ""
assert response_type in self.utils.output_formats, (
self.logger.error("Invalid response type %s", response_type))
if response_type == "KML":
search_results = self.ConstructKMLResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
elif response_type == "JSONP":
search_results = self.ConstructJSONPResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
return search_results
def main(coords, response_type):
gepobj = CoordinateSearch()
gepobj.DoSearch(coords, response_type)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
| tst-mswartz/earthenterprise | earth_enterprise/src/server/wsgi/search/plugin/coordinate_search_handler.py | Python | apache-2.0 | 10,743 |
# -*- coding: utf-8 -*-
# list of priorities that requirements can have
PRIORITY_LIST = ['O', 'F', 'D']
# list of types that requirements can have
TYPE_LIST = ['F', 'P', 'Q', 'D']
| diegoberaldin/PyRequirementManager | src/model/constants.py | Python | gpl-3.0 | 181 |
# -*- coding: utf-8 -*-
#
# Copyright 2016, 2017 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiobotocore
import json
import logging
import os
import os.path
import yaml
from botocore.exceptions import ClientError
from livebridge.config import AWS
from livebridge.controldata.base import BaseControl
logger = logging.getLogger(__name__)
class ControlFile(BaseControl):
def __init__(self):
self._sqs_client = None
self._s3_client = None
self.config = AWS
self._updated_local = None
self.auto_update = True
async def close(self):
if self._sqs_client:
await self._sqs_client.close()
if self._s3_client:
await self._s3_client.close()
@property
async def sqs_client(self):
if self._sqs_client:
return self._sqs_client
session = aiobotocore.get_session()
self._sqs_client = session.create_client(
'sqs',
region_name=self.config["region"],
aws_secret_access_key=self.config["secret_key"] or None,
aws_access_key_id=self.config["access_key"] or None)
await self._purge_sqs_queue()
return self._sqs_client
@property
def s3_client(self):
if self._s3_client:
return self._s3_client
session = aiobotocore.get_session()
self._s3_client = session.create_client(
's3',
region_name=self.config["region"],
aws_secret_access_key=self.config["secret_key"] or None,
aws_access_key_id=self.config["access_key"] or None)
return self._s3_client
async def _purge_sqs_queue(self):
# purge queue before starting watching
try:
await self._sqs_client.purge_queue(
QueueUrl=self.config["sqs_s3_queue"]
)
logger.info("Purged SQS queue {}".format(self.config["sqs_s3_queue"]))
except ClientError as exc:
logger.warning("Purging SQS queue failed with: {}".format(exc))
async def check_control_change(self, control_path=None):
if control_path and not control_path.startswith("s3://"):
return await self._check_local_changes(control_path)
elif self.config.get("sqs_s3_queue", False):
return await self._check_s3_changes()
async def _check_local_changes(self, control_path):
is_changed = False
try:
last_updated = os.stat(control_path).st_mtime
if last_updated != self._updated_local:
is_changed = True
self._updated_local = last_updated
except Exception as exc:
logger.error("Error fetching last updated time of local control file: {}".format(exc))
return is_changed
async def _check_s3_changes(self):
client = await self.sqs_client
# check for update events
try:
response = await client.receive_message(
QueueUrl=self.config["sqs_s3_queue"]
)
for msg in response.get("Messages", []):
logger.debug("SQS {}".format(msg.get("MessageId")))
body = msg.get("Body")
data = json.loads(body) if body else None
await client.delete_message(
QueueUrl=self.config["sqs_s3_queue"],
ReceiptHandle=msg.get("ReceiptHandle")
)
if data:
for rec in data.get("Records", []):
logger.debug("EVENT: {} {}".format(
rec.get("s3", {}).get("object", {}).get("key"), rec.get("eventName")))
return True
except Exception as exc:
logger.error("Error fetching SQS messages with: {}".format(exc))
return False
async def load(self, path, *, resolve_auth=False):
if not path.startswith("s3://"):
self.auto_update = False
body = self._load_from_file(path)
else:
body = await self._load_from_s3(path)
return yaml.load(body)
def _load_from_file(self, path):
logger.info("Loading control file from disk: {}".format(path))
if not os.path.exists(path):
raise IOError("Path for control file not found.")
file = open(path, "r")
body = file.read()
self._updated_local = os.stat(path).st_mtime
file.close()
return body
async def _save_to_file(self, path, data):
logger.info("Saving control file to disk.")
directory = os.path.dirname(os.path.abspath(path))
if not os.access(directory, os.W_OK):
raise IOError("Path for control file not writable: {}".format(path))
file = open(path, "w+")
file.write(data)
file.close()
return True
async def _load_from_s3(self, url):
bucket, key = url.split('/', 2)[-1].split('/', 1)
logger.info("Loading control file from s3: {} - {}".format(bucket, key))
control_file = await self.s3_client.get_object(Bucket=bucket, Key=key)
control_data = await control_file["Body"].read()
return control_data
async def _save_to_s3(self, path, data):
bucket, key = path.split('/', 2)[-1].split('/', 1)
logger.info("Saving control file to s3: {} - {}".format(bucket, key))
await self.s3_client.put_object(Body=data, Bucket=bucket, Key=key)
return True
async def save(self, path, data):
res = False
yaml_data = yaml.dump(data, indent=4, default_flow_style=False)
if not path.startswith("s3://"):
res = await self._save_to_file(path, yaml_data)
else:
res = await self._save_to_s3(path, yaml_data)
return res
| dpa-newslab/livebridge | livebridge/controldata/controlfile.py | Python | apache-2.0 | 6,321 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-07 00:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0009_database'),
]
operations = [
migrations.AlterField(
model_name='site',
name='category',
field=models.CharField(choices=[('static', 'Static'), ('php', 'PHP'), ('dynamic', 'Dynamic'), ('vm', 'Virtual Machine')], max_length=16),
),
]
| tjcsl/director | web3/apps/sites/migrations/0010_auto_20161207_0014.py | Python | mit | 538 |
# Copyright 2015 Docker, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Taken from Docker Compose:
# https://github.com/docker/compose/blob/master/compose/config/interpolation.py
import string
class InvalidInterpolation(Exception):
def __init__(self, string):
self.string = string
class Interpolator(object):
"""
Configuration options may contain environment variables. For example,
suppose the shell contains `ETCD_VERSION=1.0` and the following
gilt.yml is supplied.
.. code-block:: yaml
- git: https://github.com/retr0h/ansible-etcd.git
version: ${ETCD_VERSION}
dst: roles/retr0h.ansible-etcd-${ETCD_VERSION}/
will substitute `${ETCD_VERSION}` with the value of the
`ETCD_VERSION` environment variable.
.. warning::
If an environment variable is not set, gilt substitutes with an
empty string.
Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Extended
shell-style features, such as `${VARIABLE-default}` and
`${VARIABLE:-default}` are also supported.
If a literal dollar sign is needed in a configuration, use a double dollar
sign (`$$`).
"""
def __init__(self, templater, mapping):
self.templater = templater
self.mapping = mapping
def interpolate(self, string):
try:
return self.templater(string).substitute(self.mapping)
except ValueError:
raise InvalidInterpolation(string)
class TemplateWithDefaults(string.Template):
idpattern = r"[_a-z][_a-z0-9]*(?::?-[^}]+)?"
# Modified from python2.7/string.py
def substitute(self, mapping):
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group("named") or mo.group("braced")
if named is not None:
if ":-" in named:
var, _, default = named.partition(":-")
return mapping.get(var) or default
if "-" in named:
var, _, default = named.partition("-")
return mapping.get(var, default)
val = mapping.get(named, "")
return "%s" % (val,)
if mo.group("escaped") is not None:
return self.delimiter
if mo.group("invalid") is not None:
self._invalid(mo)
return self.pattern.sub(convert, self.template)
| metacloud/gilt | gilt/interpolation.py | Python | mit | 2,988 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import List
from ddd.logic.encodage_des_notes.encodage.builder.gestionnaire_parcours_builder import GestionnaireParcoursBuilder
from ddd.logic.encodage_des_notes.encodage.commands import EncoderNotesCommand
from ddd.logic.encodage_des_notes.encodage.domain.model.note_etudiant import IdentiteNoteEtudiant
from ddd.logic.encodage_des_notes.encodage.domain.service.cohorte_non_complete import CohorteAvecEncodageIncomplet
from ddd.logic.encodage_des_notes.encodage.domain.service.encoder_notes_en_lot import EncoderNotesEnLot
from ddd.logic.encodage_des_notes.encodage.domain.service.i_cohortes_du_gestionnaire import ICohortesDuGestionnaire
from ddd.logic.encodage_des_notes.encodage.domain.service.i_historiser_notes import IHistoriserEncodageNotesService
from ddd.logic.encodage_des_notes.encodage.domain.service.i_notifier_encodage_notes import INotifierEncodageNotes
from ddd.logic.encodage_des_notes.encodage.repository.note_etudiant import INoteEtudiantRepository
from ddd.logic.encodage_des_notes.shared_kernel.domain.builder.encoder_notes_rapport_builder import \
EncoderNotesRapportBuilder
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_attribution_enseignant import \
IAttributionEnseignantTranslator
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_inscription_examen import IInscriptionExamenTranslator
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_periode_encodage_notes import \
IPeriodeEncodageNotesTranslator
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_signaletique_etudiant import \
ISignaletiqueEtudiantTranslator
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_signaletique_personne import \
ISignaletiquePersonneTranslator
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.periode_encodage_ouverte import PeriodeEncodageOuverte
from ddd.logic.encodage_des_notes.shared_kernel.repository.i_encoder_notes_rapport import IEncoderNotesRapportRepository
from ddd.logic.encodage_des_notes.soumission.repository.i_adresse_feuille_de_notes import \
IAdresseFeuilleDeNotesRepository
NouvelleNote = str
EmailEtudiant = str
def encoder_notes(
cmd: 'EncoderNotesCommand',
note_etudiant_repo: 'INoteEtudiantRepository',
periode_encodage_note_translator: 'IPeriodeEncodageNotesTranslator',
cohortes_gestionnaire_translator: 'ICohortesDuGestionnaire',
notification_encodage: 'INotifierEncodageNotes',
attribution_enseignant_translator: 'IAttributionEnseignantTranslator',
signaletique_personne_repo: 'ISignaletiquePersonneTranslator',
signaletique_etudiant_repo: 'ISignaletiqueEtudiantTranslator',
adresse_feuille_de_notes_repo: 'IAdresseFeuilleDeNotesRepository',
historiser_note_service: 'IHistoriserEncodageNotesService',
inscription_examen_translator: 'IInscriptionExamenTranslator',
rapport_repository: 'IEncoderNotesRapportRepository'
) -> List['IdentiteNoteEtudiant']:
# Given
PeriodeEncodageOuverte().verifier(periode_encodage_note_translator)
periode_ouverte = periode_encodage_note_translator.get()
gestionnaire_parcours = GestionnaireParcoursBuilder().get(
matricule_gestionnaire=cmd.matricule_fgs_gestionnaire,
annee_concernee=periode_ouverte.annee_concernee,
cohortes_gestionnaire_translator=cohortes_gestionnaire_translator,
)
cohortes_non_completes = CohorteAvecEncodageIncomplet().search(
[cmd_note.code_unite_enseignement for cmd_note in cmd.notes_encodees],
periode_ouverte.annee_concernee,
periode_ouverte.session_concernee,
note_etudiant_repo,
inscription_examen_translator,
)
# WHEN
rapport = EncoderNotesRapportBuilder.build_from_command(cmd)
notes = EncoderNotesEnLot().execute(
cmd.notes_encodees,
gestionnaire_parcours,
note_etudiant_repo,
periode_ouverte,
historiser_note_service,
inscription_examen_translator,
rapport,
rapport_repository,
notification_encodage,
cohortes_non_completes,
attribution_enseignant_translator,
signaletique_personne_repo,
signaletique_etudiant_repo,
adresse_feuille_de_notes_repo,
)
return notes
| uclouvain/osis | ddd/logic/encodage_des_notes/encodage/use_case/write/encoder_notes_service.py | Python | agpl-3.0 | 5,601 |
#!/usr/bin/env python
# Copyright (c) 2015 Tobias Neumann, Philipp Rescheneder.
#
# This file is part of Slamdunk.
#
# Slamdunk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Slamdunk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#SPLASH:
#Systematic Performance evaLuAtion of Slamdunk beHaviour (accomplishemnt, achievement)
#########################################################################
# Main routine for the SLAMdunk simulation
#########################################################################
# Imports
#########################################################################
from __future__ import print_function
import sys, os, random
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os.path import basename
from joblib import Parallel, delayed
from slamdunk.dunks import simulator
from slamdunk.utils.misc import replaceExtension, removeExtension, SampleInfo
from slamdunk.version import __version__
from shutil import copyfile
########################################################################
# Global variables
########################################################################
printOnly = False
verbose = True
mainOutput = sys.stderr
logToMainOutput = False
########################################################################
# Routine definitions
########################################################################
def message(msg):
print(msg, file=mainOutput)
def createDir(directory):
if directory:
if not os.path.exists(directory):
message("Creating output directory: " + directory)
os.makedirs(directory)
def reads(outputDirectory, bed, sampleName, readLenght, readNumber, readCoverage, seqError, pulseTimePoint, chaseTimePoint, conversionRate, sampleInfo, labledTranscripots = -1.0):
message("Simulating read sample: " + sampleName)
bed12File = replaceExtension(bed, ".bed12")
bed12FastaFile = replaceExtension(bed, ".fa")
explvFile = replaceExtension(bed, ".eplv")
bedReads = os.path.join(outputDirectory, sampleName + "_reads_tmp.bed")
faReads = os.path.join(outputDirectory, sampleName + "_reads_tmp.fa")
totalUTRlength = simulator.getTotalUtrLength(bed12File)
if(readNumber == 0):
readNumber = (totalUTRlength / readLenght) * readCoverage
readNumber = int(readNumber * (random.uniform(-0.2, 0.2) + 1))
#message("Simulating " + str(readNumber) + " reads with sequencing error of " + str(seqError))
simulator.simulateReads(bed12File, bed12FastaFile, explvFile, bedReads, faReads, readLenght, readNumber, seqError)
bamReadsWithTC = os.path.join(outputDirectory, sampleName + "_reads.bam")
utrSummary = os.path.join(outputDirectory, sampleName + "_utrsummary.tsv")
simulator.addTcConversions(bed, faReads, bamReadsWithTC, pulseTimePoint, chaseTimePoint, utrSummary, conversionRate, readNumber, sampleInfo, labledTranscripots)
os.unlink(faReads)
os.unlink(bedReads)
def run():
########################################################################
# Argument parsing
########################################################################
# TODO: parameter for simulating expression levels
# TODO: more realistic simulation of half lifes
# Info
usage = "SLAMdunk software for simulating SLAM-seq data"
# Main Parsers
parser = ArgumentParser(description=usage, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
# Initialize Subparsers
subparsers = parser.add_subparsers(help="", dest="command")
allparse = subparsers.add_parser('all', help='Simulated full SlamSeq samples')
allparse.add_argument("-r", "--reference", type=str, required=True, dest="referenceFile", help="Reference fasta file")
allparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
allparse.add_argument("-l", "--read-length", type=int, required=True, dest="readLength", help="All UTRs short than the read length are removed.")
allparse.add_argument("-o", "--outputDir", type=str, required=False, dest="outputDir", default=".", help="Output directory for mapped BAM files.")
allparse.add_argument("-s", "--snp-rate", type=float, required=False, default=0.001, dest="snpRate", help="SNP rate in UTRs")
allparse.add_argument("-cov", "--read-coverage", type=int, required=False, default=20, dest="readCoverage", help="Read coverage (if read number is not specified)")
allparse.add_argument("-e", "--sequencing-error", type=float, required=False, default=0.05, dest="seqError", help="Sequencing error")
allparse.add_argument("-p", "--pulse", type=str, required=False, dest="pulse", help="Pulse in minutes")
allparse.add_argument("-ra", "--rates", type=str, required=False, default=None, dest="rates", help="List of rates")
allparse.add_argument("-c", "--chase", type=str, required=False, default="", dest="chase", help="Chase in minutes")
allparse.add_argument("-tc", "--tc-rate", type=float, required=False, dest="conversionRate", default=0.024, help="T->C conversion rate")
allparse.add_argument("-minhl", "--min-halflife", type=int, required=False, default=30, dest="minHalfLife", help="Lower bound for the simulated half lifes in minutes")
allparse.add_argument("-maxhl", "--max-halflife", type=int, required=False, default=720, dest="maxHalfLife", help="Upper bound for the simulated half lifes in minutes")
allparse.add_argument("-t", "--threads", type=int, required=False, default=1, dest="threads", help="Thread number")
allparse.add_argument("-rep", "--replicates", type=int, required=False, default=1, dest="replicates", help="Number of replicates")
allparse.add_argument('-st', "--skip-turnover", required=False, dest="skipTurnover", action='store_true', help="Take half-life from score filed of input BED file")
preparebedparse = subparsers.add_parser('preparebed', help='Prepares a UTR BED file for SlamSim')
preparebedparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
preparebedparse.add_argument("-l", "--read-length", type=int, required=True, dest="readLength", help="All UTRs short than the read length are removed.")
preparebedparse.add_argument("-o", "--outputDir", type=str, required=False, dest="outputDir", default=".", help="Output directory for mapped BAM files.")
turnoverparse = subparsers.add_parser('turnover', help='Simulate utrs and turnover rate')
turnoverparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
turnoverparse.add_argument("-minhl", "--min-halflife", type=int, required=False, default=30, dest="minHalfLife", help="Lower bound for the simulated half lifes in minutes")
turnoverparse.add_argument("-maxhl", "--max-halflife", type=int, required=False, default=720, dest="maxHalfLife", help="Upper bound for the simulated half lifes in minutes")
turnoverparse.add_argument("-o", "--outputDir", type=str, required=False, dest="outputDir", default=".", help="Output directory for mapped BAM files.")
utrsparse = subparsers.add_parser('utrs', help='Simulate utrs and turnover rate')
utrsparse.add_argument("-r", "--reference", type=str, required=True, dest="referenceFile", help="Reference fasta file")
utrsparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
utrsparse.add_argument("-l", "--read-length", type=int, required=True, dest="readLength", help="Read length")
utrsparse.add_argument("-o", "--outputDir", type=str, required=False, dest="outputDir", default=".", help="Output directory for mapped BAM files.")
utrsparse.add_argument("-s", "--snp-rate", type=float, required=False, default=0.001, dest="snpRate", help="SNP rate in UTRs")
simulateparse = subparsers.add_parser('reads', help='Simulate SLAM-seq read data')
simulateparse.add_argument("-o", "--outputDir", type=str, required=False, dest="outputDir", default=".", help="Output directory for mapped BAM files.")
simulateparse.add_argument("--sample-name", type=str, required=True, dest="sampleName", help="Name of sample")
simulateparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
simulateparse.add_argument("-l", "--read-length", type=int, required=True, dest="readLength", help="Read length")
simulateparse.add_argument("-n", "--read-number", type=int, required=False, default=0, dest="readNumber", help="Number of reads to simulate")
simulateparse.add_argument("-cov", "--read-coverage", type=int, required=False, default=20, dest="readCoverage", help="Read coverage (if read number is not specified)")
simulateparse.add_argument("-e", "--sequencing-error", type=float, required=False, default=0.05, dest="seqError", help="Sequencing error")
simulateparse.add_argument("-p", "--pulse", type=int, required=True, dest="pulse", help="Pulse in minutes")
simulateparse.add_argument("-c", "--chase", type=int, required=False, default=0, dest="chase", help="Chase in minutes")
simulateparse.add_argument("-tc", "--tc-rate", type=float, required=False, dest="conversionRate", default=0.024, help="T->C conversion rate")
evalparser = subparsers.add_parser('eval-counts', help='Evaluate count files')
evalparser.add_argument("-s", "--simulated", type=str, required=True, dest="simulated", help="")
evalparser.add_argument("-d", "--slamdun", type=str, required=True, dest="slamdunk", help="")
evalparser.add_argument("-o", "--outputFile", type=str, required=True, dest="outputFile", help="")
evalreadsparser = subparsers.add_parser('eval-reads', help='Evaluate read files')
evalreadsparser.add_argument("-o", "--outputFile", type=str, required=True, dest="outputFile", help="")
evalreadsparser.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
evalreadsparser.add_argument("-r", "--reference", type=str, required=True, dest="referenceFile", help="Reference fasta file")
evalreadsparser.add_argument('bam', action='store', help='Bam file(s)' , nargs="+")
evalconversionplotparse = subparsers.add_parser('plot.conversions', help='Plots differences in simulated and found conversion rates')
evalconversionplotparse.add_argument("-sim", "--simDir", type=str, required=True, dest="simDir", help="")
evalconversionplotparse.add_argument("-slam", "--slamdunkDir", type=str, required=True, dest="slamDir", help="")
evalconversionplotparse.add_argument("-o", "--outputFile", type=str, required=True, dest="outputFile", help="")
evalconversionplotparse.add_argument("-tc", "--tc-rate", type=float, required=False, dest="conversionRate", default=0.03, help="T->C conversion rate")
evalhalflifeparse = subparsers.add_parser('plot.halflifes', help='Plots half lifes')
evalhalflifeparse.add_argument("-sim", "--simulated-hl", type=str, required=True, dest="simHL", help="Simulated half-lifes")
evalhalflifeparse.add_argument("-pred", "--predicted-hl", type=str, required=True, dest="predHL", help="Predicted half-lifes")
evalhalflifeparse.add_argument("-true", "--true-hl", type=str, required=True, dest="trueHL", help="Predicted half-lifes")
evalhalflifeparse.add_argument("-o", "--outputFile", type=str, required=True, dest="outputFile", help="")
evalhalflifeparse.add_argument("-e", "--erroroutputFile", type=str, required=True, dest="erroutputFile", help="")
evalhalflifeplotparse = subparsers.add_parser('plot.halflifespergene', help='Plots half lifes')
evalhalflifeplotparse.add_argument("-sim", "--simDir", type=str, required=True, dest="simDir", help="")
evalhalflifeplotparse.add_argument("-slam", "--slamdunkDir", type=str, required=True, dest="slamDir", help="")
evalhalflifeplotparse.add_argument("-t", "--timepoints", type=str, required=True, dest="timepoints", help="")
evalhalflifeplotparse.add_argument("-o", "--outputFile", type=str, required=True, dest="outputFile", help="")
evalhalflifeplotparse.add_argument("-tc", "--tc-rate", type=float, required=False, dest="conversionRate", default=0.03, help="T->C conversion rate")
evalhalflifeplotparse.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
utilcrateparse = subparsers.add_parser('util.conversionrate', help='Get conversion rate from mapped BAM files')
utilcrateparse.add_argument('bam', action='store', help='Bam file(s)' , nargs="+")
utilcrateparse.add_argument("-r", "--reference", type=str, required=True, dest="referenceFile", help="Reference fasta file")
utilcrateparse.add_argument("-region", "--region", type=str, required=True, dest="region", help="")
utilcrateparse.add_argument('-rev',required=False, dest="reverse", action='store_true')
args = parser.parse_args()
########################################################################
# Routine selection
########################################################################
def prepareBed(outputDirectory, bed, readLength):
createDir(outputDirectory)
slamSimBed = os.path.join(outputDirectory, replaceExtension(basename(bed), ".bed", "_original"))
simulator.prepareBED(bed, slamSimBed, readLength)
def turnOver(outputDirectory, bed, minHalflife, maxHalfLife, skipTurnover=False):
message("Simulating turnover")
createDir(outputDirectory)
trunoverBed = os.path.join(outputDirectory, replaceExtension(basename(bed), ".bed", "_utrs"))
if not skipTurnover:
simulator.simulateTurnOver(bed, trunoverBed, minHalflife, maxHalfLife)
else:
copyfile(bed, trunoverBed)
def Utrs(outputDirectory, bed, referenceFasta, readLength, polyALength, snpRate):
message("Simulating UTRs")
createDir(outputDirectory)
bed12 = os.path.join(outputDirectory, replaceExtension(basename(bed), ".bed12", "_utrs"))
bed12Fasta = os.path.join(outputDirectory, replaceExtension(basename(bed), ".fa", "_utrs"))
explv = os.path.join(outputDirectory, replaceExtension(basename(bed), ".eplv", "_utrs"))
vcfFile = os.path.join(outputDirectory, replaceExtension(basename(bed), ".vcf", "_utrs"))
totalUTRlength = simulator.prepareUTRs(bed, bed12, bed12Fasta, referenceFasta, readLength, polyALength, explv, snpRate, vcfFile)
command = args.command
if (command == "preparebed") :
prepareBed(args.outputDir, args.bed, args.readLength)
elif (command == "turnover"):
turnOver(args.outputDir, args.bed, args.minHalfLife, args.maxHalfLife)
elif (command == "utrs") :
polyALength = 0
Utrs(args.outputDir, args.bed, args.referenceFile, args.readLength, polyALength, args.snpRate)
elif (command == "reads") :
createDir(args.outputDir)
reads(args.outputDir, args.bed, args.sampleName, args.readLength, args.readNumber, args.readCoverage, args.seqError, args.pulse, args.chase, args.conversionRate)
elif (command == "eval-counts") :
outputPath = os.path.dirname(args.outputFile)
createDir(outputPath)
simulator.evaluate(args.simulated, args.slamdunk, args.outputFile, mainOutput)
elif (command == "eval-reads") :
outputPath = os.path.dirname(args.outputFile)
createDir(outputPath)
for bam in args.bam:
simulator.evaluateReads(bam, args.referenceFile, args.bed, args.outputFile, mainOutput)
elif (command == "plot.conversions") :
simDir = args.simDir
slamDir = args.slamDir
outputPDF = args.outputFile
conversionRate = args.conversionRate
outputPath = os.path.dirname(outputPDF)
createDir(outputPath)
simulator.plotconversiondifferences(simDir, slamDir, conversionRate, outputPDF)
elif (command == "plot.halflifespergene") :
bed = args.bed
simDir = args.simDir
slamDir = args.slamDir
outputPDF = args.outputFile
conversionRate = args.conversionRate
timePoints = args.timepoints
outputPath = os.path.dirname(outputPDF)
createDir(outputPath)
simulator.plotHalfLifes(bed, simDir, slamDir, timePoints, conversionRate, outputPDF)
elif (command == "plot.halflifes") :
trueHLFile = args.trueHL
simHLFile = args.simHL
predHLFile = args.predHL
outputPDF = args.outputFile
erroutputCSV = args.erroutputFile
simulator.evalHalfLifes(trueHLFile, simHLFile, predHLFile, outputPDF, erroutputCSV)
elif (command == "util.conversionrate") :
ref = args.referenceFile
bams = args.bam
region = args.region
region = region.replace(",", "")
chromosome = region.split(":")[0]
start = int(region.split(":")[1].split("-")[0])
end = int(region.split(":")[1].split("-")[1])
strand = "+"
if(args.reverse):
strand = "-"
for bam in bams:
simulator.getConversionRateFromBam(bam, ref, chromosome, start, end, strand)
elif (command == "all") :
#args.outputDir, args.bed, args.sampleName, args.readLength, args.readNumber, args.readCoverage, args.seqError, args.pulse, args.chase, args.conversionRate
referenceFile = args.referenceFile
baseFolder = args.outputDir
annotationFile = args.bed
readLength = args.readLength
readCoverage = args.readCoverage
sequencingError = args.seqError
polyALength = 0
#timePoints = [0, 15, 30, 60, 180, 360, 720, 1440]
if not args.pulse == None:
timePoints = args.pulse.split(",")
chaseTimePoints = []
if len(args.chase) > 0:
chaseTimePoints = args.chase.split(",")
labledTranscripots = None
if not args.rates == None:
labledTranscripots = args.rates.split(",")
replicates = args.replicates
n = args.threads
createDir(baseFolder)
annotationPrefix = removeExtension(basename(annotationFile))
simulatedAnnotationPref = os.path.join(baseFolder, annotationPrefix)
prepareBed(baseFolder, annotationFile, readLength)
# TODO parameter to skip this
turnOver(baseFolder, simulatedAnnotationPref + "_original.bed", args.minHalfLife, args.maxHalfLife, args.skipTurnover)
Utrs(baseFolder, simulatedAnnotationPref + "_original.bed", referenceFile, readLength, polyALength, args.snpRate)
sampleFile = open(os.path.join(baseFolder, "samples.tsv"), "w")
sampleNumber = 1
jobs = []
if(labledTranscripots == None):
for timePoint in timePoints:
for replicate in range(1, replicates + 1):
sampleName = "sample_" + str(sampleNumber) + "_pulse_" + str(timePoint) + "min_rep" + str(replicate)
sampleInfo = SampleInfo(ID = sampleNumber, Name = sampleName, Type = "pulse", Time = str(timePoint))
jobs.append(delayed(reads)(baseFolder,
simulatedAnnotationPref + "_original_utrs.bed",
sampleName,
readLength, 0, readCoverage, sequencingError,
int(timePoint), 0, args.conversionRate, sampleInfo))
sampleNumber += 1
print(os.path.join(baseFolder, sampleName + "_reads.bam"), sampleName, "pulse", timePoint, sep="\t", file=sampleFile)
for timePoint in chaseTimePoints:
for replicate in range(1, replicates + 1):
sampleName = "sample_" + str(sampleNumber) + "_chase_" + str(timePoint) + "min_rep" + str(replicate)
sampleInfo = SampleInfo(ID = sampleNumber, Name = sampleName, Type = "chase", Time = str(timePoint))
jobs.append(delayed(reads)(baseFolder,
simulatedAnnotationPref + "_original_utrs.bed",
sampleName,
readLength, 0, readCoverage, sequencingError,
int(timePoints[-1]), int(timePoint), args.conversionRate, sampleInfo))
sampleNumber += 1
print(os.path.join(baseFolder, sampleName + "_reads.bam"), sampleName, "chase", timePoint, sep="\t", file=sampleFile)
else:
for rate in labledTranscripots:
for replicate in range(1, replicates + 1):
sampleName = "sample_" + str(sampleNumber) + "_rate_" + str(rate) + "_rep" + str(replicate)
sampleInfo = SampleInfo(ID = sampleNumber, Name = sampleName, Type = "rate", Time = str(rate))
jobs.append(delayed(reads)(baseFolder,
simulatedAnnotationPref + "_original_utrs.bed",
sampleName,
readLength, 0, readCoverage, sequencingError,
0, 0, args.conversionRate, sampleInfo, float(rate)))
sampleNumber += 1
print(os.path.join(baseFolder, sampleName + "_reads.bam"), sampleName, "rate", rate, sep="\t", file=sampleFile)
sampleFile.close()
results = Parallel(n_jobs=n, verbose=False)(jobs)
else:
parser.error("Too few arguments.")
if __name__ == '__main__':
run()
| t-neumann/slamdunk | slamdunk/splash.py | Python | agpl-3.0 | 22,085 |
# -*- coding: utf-8 -*-
"""
spyderplugins is a **namespace package** for spyder plugins
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
| martindurant/conda-manager | spyplugins/__init__.py | Python | mit | 156 |
class KeyGenerator(object):
@staticmethod
def get_lens_key(lens_details):
return "{}_{}".format(lens_details['name'], lens_details['lenstype'])
@staticmethod
def get_model_key(model_details):
return "{}_{}_{}".format(model_details['style'], model_details['name'], model_details['sku'])
| stevekew/oakleydb | infra/oakleydb/keygenerator.py | Python | mpl-2.0 | 320 |
#coding=utf-8
from flask import render_template, redirect, request, url_for, flash,abort
from sqlalchemy import and_,desc,or_
from . import database
from ..models import dbs,user_db,users
from .forms import DBAddForm
from app.main.forms import SearchForm
from flask.ext.login import login_required, current_user
from config import Config, basedir
from .. import db
@database.route('/dblist',methods=['GET','POST'])
@database.route('/dblist/<int:page>', methods=['GET', 'POST'])
@login_required
def dblist(page=1):
if request.method == 'POST':
operation = request.form['operation']
if operation == 'search':
search_txt = request.form['search_text'].encode('utf8')
if current_user.role == '0':
db_all = dbs.query.filter(or_(dbs.name.ilike('%%%s%%' %search_txt),dbs.dbname.ilike('%%%s%%' %search_txt))).paginate(page,Config.POSTS_PER_PAGE,False)
else:
user_db_list = db.session.query(user_db).filter_by(user_id=current_user.id).all()
db_id_list = []
for db_id in user_db_list:
db_id_list.append(db_id[1])
db_all = dbs.query.filter(and_(dbs.id.in_(db_id_list),or_(dbs.name.ilike('%%%s%%' %search_txt),dbs.dbname.ilike('%%%s%%' %search_txt)))).paginate(page,Config.POSTS_PER_PAGE,False)
return render_template('dblist.html',db_all=db_all,username=current_user.username)
elif operation =='db_delete':
if current_user.role == '0':
db_id = request.form['db_id']
db_item = dbs.query.filter_by(id=db_id).first()
db.session.delete(db_item)
db.session.commit()
return u'删除成功!'
else:
return u'权限拒绝!'
else:
if current_user.role == '0':
db_all = dbs.query.paginate(page, Config.POSTS_PER_PAGE, False)
else:
user_db_list = db.session.query(user_db).filter_by(user_id=current_user.id).all()
db_id_list = []
for db_id in user_db_list:
db_id_list.append(db_id[1])
db_all = dbs.query.filter(dbs.id.in_(db_id_list)).paginate(page,Config.POSTS_PER_PAGE,False)
return render_template('dblist.html',db_all=db_all,username=current_user.username)
@database.route('/userdb/<int:userid>', methods=['GET','POST'])
@login_required
def userdb(userid):
if request.method == 'POST':
try:
if current_user.role == '0':
user = users.query.filter_by(id=userid).first()
db_list = request.form['db_list'].strip()
db_obj_list = []
if not db_list == '':
db_list = db_list.split(',')
for db_id in db_list:
db_obj = dbs.query.filter_by(id=db_id).first()
db_obj_list.append(db_obj)
user.dbs = db_obj_list
db.session.add(user)
db.session.commit()
return u'更新授权成功!'
else:
return u'无权限操作!'
except Exception, e:
print e
return u'更新授权失败!'
else:
User_dbs = users.query.filter_by(id=userid).first().dbs
dbs_all = dbs.query.order_by(dbs.id.desc()).all()
for db_item in dbs_all:
if db_item in User_dbs:
dbs_all.remove(db_item)
return render_template('user_db.html',User_db=User_dbs,All_db=dbs_all)
@database.route('/dbadd', methods=['GET','POST'])
@login_required
def dbadd():
form = DBAddForm()
if form.validate_on_submit():
db_item = dbs(name=form.name.data,
dburl=form.dburl.data,
dbname=form.dbname.data,
dbuser=form.dbname.data,
dbpass=form.dbpass.data,
node=form.node.data)
db.session.add(db_item)
db.session.commit()
flash(u'%s 数据库添加成功!' %form.name.data)
return render_template('dbadd.html',form=form)
else:
return render_template('dbadd.html',form=form)
| linuxyan/opsmanage | app/database/views.py | Python | apache-2.0 | 4,214 |
#importing fluxi runs
from fluxi.fluxi import Fluxi
#initializing a whole Qt application with
#a window, movable docks, parameter explorer, debug output window and logging
#is so easy
fl=Fluxi("Example")
#%%
print fl._callInMainThread("_test",1,2,x="a")
#%%
from fluxis_misc import ExecInThread
def test(thr):
print thr
print fl.C("A woop Chart ohoh")
thr=ExecInThread(action=test)
#%%
fl.drawtimer.stop() | fluxiresearch/fluxi | devtests/test_thread.py | Python | mit | 424 |
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.linear_model import LogisticRegression as LGR
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.ensemble import ExtraTreesClassifier as ET
from xgboost_multi import XGBC
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import log_loss
import numpy as np
import pandas as pd
import pickle
# create model_list
def get_model_list():
model_list = []
for num_round in [50]:
for max_depth in [30]:
for eta in [0.25]:
for min_child_weight in [1]:
for col_sample in [1]:
model_list.append((XGBC(num_round = num_round, max_depth = max_depth, eta = eta,
min_child_weight = min_child_weight, colsample_bytree = col_sample),
'xgb_tree_%i_depth_%i_lr_%f_child_%i_col_sample_%i'%(num_round, max_depth, eta, min_child_weight,col_sample)))
return model_list
def gen_data():
# the 4k features!
the_train = np.hstack((pickle.load(open('X33_train_reproduce.p','rb')),pickle.load(open('Xcode_train.p','rb'))[:,:800]))
the_test = np.hstack((pickle.load(open('X33_test_reproduce.p','rb')),pickle.load(open('Xcode_test.p','rb'))[:,:800]))
# corresponding id and labels
Id = pickle.load(open('xid.p','rb'))
labels = pickle.load(open('y.p','rb'))
Id_test = pickle.load(open('Xt_id.p','rb'))
# merge them into pandas
join_train = np.column_stack((Id, the_train, labels))
join_test = np.column_stack((Id_test, the_test))
train = pd.DataFrame(join_train, columns=['Id']+['the_fea%i'%i for i in xrange(the_train.shape[1])] + ['Class'])
test = pd.DataFrame(join_test, columns=['Id']+['the_fea%i'%i for i in xrange(the_train.shape[1])])
del join_train, join_test
# convert into numeric features
train = train.convert_objects(convert_numeric=True)
test = test.convert_objects(convert_numeric=True)
train_image = pd.read_csv("train_asm_image.csv", usecols=['Id']+['asm_%i'%i for i in xrange(800)])
test_image = pd.read_csv("test_asm_image.csv", usecols=['Id']+['asm_%i'%i for i in xrange(800)])
train = pd.merge(train, train_image, on='Id')
test = pd.merge(test, test_image, on='Id')
print "the data dimension:"
print train.shape, test.shape
return train, test
def gen_submission(model):
# read in data
print "read data and prepare modelling..."
train, test = gen_data()
X = train
Id = X.Id
labels = np.array(X.Class - 1) # for the purpose of using multilogloss fun.
del X['Id']
del X['Class']
X = X.as_matrix()
X_test = test
id_test = X_test.Id
del X_test['Id']
X_test = X_test.as_matrix()
clf, clf_name = model
print "generating model %s..."%clf_name
clf.fit(X, labels)
pred = clf.predict_proba(X_test)
pred=pred.reshape(X_test.shape[0],9)
pred = np.column_stack((id_test, pred))
submission = pd.DataFrame(pred, columns=['Id']+['Prediction%i'%i for i in xrange(1,10)])
submission = submission.convert_objects(convert_numeric=True)
submission.to_csv('model2.csv',index = False)
def cross_validate(model_list):
# read in data
print "read data and prepare modelling..."
train, test = gen_data()
X = train
Id = X.Id
labels = np.array(X.Class - 1) # for the purpose of using multilogloss fun.
del X['Id']
del X['Class']
X = X.as_matrix()
X_test = test
id_test = X_test.Id
del X_test['Id']
X_test = X_test.as_matrix()
kf = KFold(labels, n_folds=4) # 4 folds
best_score = 1.0
for j, (clf, clf_name) in enumerate(model_list):
print "modelling %s"%clf_name
stack_train = np.zeros((len(Id),9)) # 9 classes.
for i, (train_fold, validate) in enumerate(kf):
X_train, X_validate, labels_train, labels_validate = X[train_fold,:], X[validate,:], labels[train_fold], labels[validate]
clf.fit(X_train,labels_train)
stack_train[validate] = clf.predict_proba(X_validate)
print multiclass_log_loss(labels, stack_train)
if multiclass_log_loss(labels, stack_train) < best_score:
best_score = multiclass_log_loss(labels, stack_train)
best_selection = j
return model_list[best_selection]
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples]
true class, intergers in [0, n_classes - 1)
y_pred : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
if __name__ == '__main__':
model_list = get_model_list()
#best_model = cross_validate(model_list)
gen_submission(model_list[0])#0.0051
print "ALL DONE!!!"
| bikash/kaggleCompetition | microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/model2.py | Python | apache-2.0 | 5,409 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo.config import cfg
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Quantum plugin provider module')),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Quantum quota driver class')),
]
_db_opts = [
cfg.StrOpt('sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'DATABASE')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError, e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'quantum.db.migration:alembic_migrations')
# attach the Quantum conf to the Alembic conf
config.quantum_config = CONF
CONF()
CONF.command.func(config, CONF.command.name)
| tpaszkowski/quantum | quantum/db/migration/cli.py | Python | apache-2.0 | 4,201 |
import unittest
import mock
import logging
import datetime
import time
import esgfpid.rabbit.asynchronous
from esgfpid.rabbit.asynchronous.exceptions import OperationNotAllowed
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
# Test resources:
from resources.TESTVALUES import *
import resources.TESTVALUES as TESTHELPERS
import globalvar
if globalvar.QUICK_ONLY:
print('Skipping slow tests in module "%s".' % __name__)
class RabbitAsynConnectorTestCase(unittest.TestCase):
slow_message = '\nRunning a slow test (avoid by using -ls flag).'
def setUp(self):
LOGGER.info('######## Next test (%s) ##########', __name__)
def tearDown(self):
LOGGER.info('#############################')
def assert_messages_are_in_queue(self, msg_queue, list_of_messages):
queue_content = []
while not msg_queue.empty():
queue_content.append(msg_queue.get(False))
for msg in list_of_messages:
self.assertIn(msg, queue_content)
#
# Init
#
'''
Test whether instances of rabbitconnector and thread
are created.
'''
def test_init_ok(self):
# Test variables:
nodemanager = TESTHELPERS.get_nodemanager()
# Run code to be tested:
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
# Check result:
self.assertIsInstance(testrabbit, esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector, 'Constructor fail.')
self.assertTrue(testrabbit._AsynchronousRabbitConnector__not_started_yet)
thread = testrabbit._AsynchronousRabbitConnector__thread
self.assertIsInstance(thread, esgfpid.rabbit.asynchronous.rabbitthread.RabbitThread, 'Constructor fail.')
#
# Start thread
#
'''
Test whether the start method does the necessary things.
We expect it to change the state, and to run the thread.
'''
@unittest.skipIf(globalvar.QUICK_ONLY, '(this test is slow)')
def test_start_thread_ok(self):
print(self.slow_message)
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
# Mock the builder (so it cannot start a connection):
def side_effect_first_connection():
LOGGER.debug('Pretending to do something in the thread.run()')
time.sleep(0.5)
LOGGER.debug('Finished pretending to do something in the thread.run()')
buildermock = mock.MagicMock()
buildermock.first_connection = mock.MagicMock()
buildermock.first_connection.side_effect = side_effect_first_connection
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__builder = buildermock
# Check preconditions:
self.assertTrue(testrabbit._AsynchronousRabbitConnector__not_started_yet)
self.assertTrue(testrabbit._AsynchronousRabbitConnector__statemachine.is_NOT_STARTED_YET())
# Run code to be tested:
# This runs the thread, which triggers building a connection.
# In this test, it calls the side effect defined above, which blocks for
# a second. Afterwards, the thread should be finished.
testrabbit.start_rabbit_thread()
# Check results:
# Check if thread is alive:
self.assertTrue(testrabbit._AsynchronousRabbitConnector__thread.is_alive())
# Join the thread...
print("Joining...")
testrabbit._AsynchronousRabbitConnector__thread.join()
print("Joining done...")
# Check if the thread has ended:
self.assertFalse(testrabbit._AsynchronousRabbitConnector__thread.is_alive())
# Check state:
self.assertFalse(testrabbit._AsynchronousRabbitConnector__not_started_yet)
self.assertTrue(testrabbit._AsynchronousRabbitConnector__statemachine.is_WAITING_TO_BE_AVAILABLE())
# Check if run was called:
buildermock.first_connection.assert_called()
#
# Sending messages
#
'''
Test behaviour when we try sending messages but the
thread was not started yet.
It should raise an exception.
'''
def test_send_message_not_started_yet(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
self.assertTrue(testrabbit._AsynchronousRabbitConnector__not_started_yet)
# Run code to be tested:
with self.assertRaises(OperationNotAllowed):
testrabbit.send_message_to_queue('message-foo')
with self.assertRaises(OperationNotAllowed):
testrabbit.send_many_messages_to_queue(['a','b','c'])
'''
Test behaviour when we try sending messages but the
thread was not started yet.
It should raise an exception.
'''
def test_send_message_not_started_yet_2(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
self.assertFalse(testrabbit._AsynchronousRabbitConnector__not_started_yet)
self.assertTrue(testrabbit._AsynchronousRabbitConnector__statemachine.is_NOT_STARTED_YET())
# Run code to be tested:
with self.assertRaises(OperationNotAllowed):
testrabbit.send_message_to_queue('message-foo')
with self.assertRaises(OperationNotAllowed):
testrabbit.send_many_messages_to_queue(['a','b','c'])
'''
Test behaviour when we send messages when the thread
was properly started.
We expect the message to be put into the queue.
We expect the publish event to be handed by the connection
to the feeder module.
'''
def test_send_message_ok(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_available()
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Mock the feeder (it has to receive the publish event):
feedermock = testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__feeder = mock.MagicMock()
# Run code to be tested:
testrabbit.send_message_to_queue('foo')
testrabbit.send_many_messages_to_queue(['a','b','c'])
# Check that publish was called:
feedermock.publish_message.assert_called()
self.assertTrue(feedermock.publish_message.call_count>=4)
# Check that the four messages were put into the queue:
msg_queue = testrabbit._AsynchronousRabbitConnector__unpublished_messages_queue
self.assert_messages_are_in_queue(msg_queue, ['foo', 'a', 'b', 'c'])
def test_send_message_waiting(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_waiting_to_be_available()
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Mock the feeder (it has to receive the publish event):
feedermock = testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__feeder = mock.MagicMock()
# Run code to be tested:
testrabbit.send_many_messages_to_queue(['a','b','c'])
testrabbit.send_message_to_queue('foo')
# Check that publish was NOT called:
feedermock.publish_message.assert_not_called()
# Check that the four messages were put into the queue:
msg_queue = testrabbit._AsynchronousRabbitConnector__unpublished_messages_queue
self.assert_messages_are_in_queue(msg_queue, ['foo', 'a', 'b', 'c'])
def test_send_message_unavail(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_permanently_unavailable()
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
# Mock the feeder (it has to receive the publish event):
feedermock = testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__feeder = mock.MagicMock()
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Run code to be tested:
testrabbit.send_message_to_queue('foo')
testrabbit.send_many_messages_to_queue(['a','b','c'])
# Check that publish was NOT called:
feedermock.publish_message.assert_not_called()
# Check that the four messages were NOT put into the queue:
msg_queue = testrabbit._AsynchronousRabbitConnector__unpublished_messages_queue
self.assertTrue(msg_queue.empty())
def test_send_message_user_closed(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_permanently_unavailable()
testrabbit._AsynchronousRabbitConnector__statemachine.set_detail_closed_by_publisher()
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
# Mock the feeder (it has to receive the publish event):
feedermock = testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__feeder = mock.MagicMock()
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Run code to be tested:
with self.assertRaises(OperationNotAllowed):
testrabbit.send_message_to_queue('foo')
with self.assertRaises(OperationNotAllowed):
testrabbit.send_many_messages_to_queue(['a','b','c'])
# Check that publish was NOT called:
feedermock.publish_message.assert_not_called()
# Check that the four messages were NOT put into the queue:
msg_queue = testrabbit._AsynchronousRabbitConnector__unpublished_messages_queue
self.assertTrue(msg_queue.empty())
#
# Gently finish
#
def test_gently_finish_ok(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_available()
# Mock the wait-event, otherwise the library blocks, because the
# wait event would be unblocked in a patched function...
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__gently_finish_ready = wait_event_mock = mock.MagicMock()
# Mock the join function, as it cannot join if the thread was not started.
testrabbit._AsynchronousRabbitConnector__thread.join = joinmock = mock.MagicMock()
# Mock shutter (it has to receive the close event)
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__shutter = shuttermock = mock.MagicMock()
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Run code to be tested:
testrabbit.finish_rabbit_thread()
# Check result
shuttermock.finish_gently.assert_called()
wait_event_mock.wait.assert_called()
joinmock.assert_called()
#
# Force finish
#
def test_force_finish_ok(self):
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_available()
# Mock the join function, as it cannot join if the thread was not started.
testrabbit._AsynchronousRabbitConnector__thread.join = joinmock = mock.MagicMock()
# Mock shutter (it has to receive the close event)
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__shutter = shuttermock = mock.MagicMock()
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Run code to be tested:
testrabbit.force_finish_rabbit_thread()
# Check result
shuttermock.force_finish.assert_called()
joinmock.assert_called()
@unittest.skipIf(globalvar.QUICK_ONLY, '(this test is slow)')
def test_force_finish_join_fails(self):
print(self.slow_message)
# Preparations
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_available()
# Mock the join function, as it cannot join if the thread was not started.
testrabbit._AsynchronousRabbitConnector__thread.join = joinmock = mock.MagicMock()
testrabbit._AsynchronousRabbitConnector__thread.is_alive = alivemock = mock.MagicMock()
alivemock.side_effect = [True,False,False,False,False,False,False]
# Mock shutter (it has to receive the close event)
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__shutter = shuttermock = mock.MagicMock()
# Mock the connection (it has to hand the event over to the feeder mock):
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
# Run code to be tested:
testrabbit.force_finish_rabbit_thread()
# Check result
shuttermock.force_finish.assert_called()
joinmock.assert_called()
#
# Kleinkram
#
def test_is_finished(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
# Mock thread.is_alive and test getter for it:
thread = testrabbit._AsynchronousRabbitConnector__thread
thread.is_alive = mock.MagicMock()
thread.is_alive.return_value = True
self.assertFalse(testrabbit.is_finished())
thread.is_alive.return_value = False
self.assertTrue(testrabbit.is_finished())
def test_tell_stop_waiting(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__gently_finish_ready = mock.MagicMock()
# Run code to be tested:
testrabbit._AsynchronousRabbitConnector__thread.tell_publisher_to_stop_waiting_for_gentle_finish()
# Check result
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__gently_finish_ready.set.assert_called()
def test_unblock_events(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
# Run code to be tested:
testrabbit._AsynchronousRabbitConnector__thread.unblock_events()
# Can't really check results... Cannot block the events to unblock them here,
# without opening a thread...
def test_wait_for_connection(self):
# Preparation:
nodemanager = TESTHELPERS.get_nodemanager()
testrabbit = esgfpid.rabbit.asynchronous.AsynchronousRabbitConnector(nodemanager)
testrabbit._AsynchronousRabbitConnector__statemachine.set_to_available()
testrabbit._AsynchronousRabbitConnector__not_started_yet = False
# Mock the feeder (it has to receive the publish event):
feedermock = testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__feeder = mock.MagicMock()
# Mock event...
# So when the synchronisation is done, a connection is there...
# When the event is called, we mock the connection (it has to hand the event over to the feeder mock):
def sync_side_effect():
connectionmock = testrabbit._AsynchronousRabbitConnector__thread._connection = TESTHELPERS.get_connection_mock()
sync_event_mock = mock.MagicMock()
sync_event_mock.wait.side_effect = sync_side_effect
testrabbit._AsynchronousRabbitConnector__thread._RabbitThread__connection_is_set = sync_event_mock
# Connection must be None first:
testrabbit._AsynchronousRabbitConnector__thread._connection = None
# Run code to be tested:
testrabbit.send_message_to_queue('foo')
# Check if the event was unblocked:
sync_event_mock.wait.assert_called()
# Check that publish was called:
feedermock.publish_message.assert_called()
# Check that the four messages were put into the queue:
msg_queue = testrabbit._AsynchronousRabbitConnector__unpublished_messages_queue
self.assert_messages_are_in_queue(msg_queue, ['foo'])
| IS-ENES-Data/esgf-pid | tests/testcases/rabbit/asyn/rabbit_asynchronous_tests.py | Python | apache-2.0 | 18,090 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import SuffixOp as SuffixOp_, Variable as Variable_, is_literal
from jx_base.language import is_op
from jx_elasticsearch.es52.expressions.true_op import MATCH_ALL
from mo_future import first
from pyLibrary.convert import string2regexp
from jx_elasticsearch.es52.painless import SuffixOp as PainlessSuffixOp
class SuffixOp(SuffixOp_):
def to_esfilter(self, schema):
if not self.suffix:
return MATCH_ALL
elif is_op(self.expr, Variable_) and is_literal(self.suffix):
var = first(schema.leaves(self.expr.var)).es_column
return {"regexp": {var: ".*" + string2regexp(self.suffix.value)}}
else:
return PainlessSuffixOp.to_es_script(self, schema).to_esfilter(schema)
| klahnakoski/SpotManager | vendor/jx_elasticsearch/es52/expressions/suffix_op.py | Python | mpl-2.0 | 1,103 |
"""
Test compiling and executing using the gdc tool.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from Common.linkingProblem import testForTool
testForTool('gdc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | test/D/HSTeoh/sconstest-linkingProblem_gdc.py | Python | mit | 1,375 |
import os, sys
import atexit
import optparse
import signal
import logging
import gc
from dpark.rdd import *
from dpark.accumulator import Accumulator
from dpark.schedule import LocalScheduler, MultiProcessScheduler, MesosScheduler
from dpark.env import env
from dpark.moosefs import walk
from dpark.tabular import TabularRDD
import dpark.conf as conf
from math import ceil
logger = logging.getLogger(__name__)
def singleton(cls):
instances = {}
def getinstance(*a, **kw):
key = (cls, tuple(a), tuple(sorted(kw.items())))
if key not in instances:
instances[key] = cls(*a, **kw)
return instances[key]
return getinstance
def setup_conf(options):
if options.conf:
conf.load_conf(options.conf)
elif 'DPARK_CONF' in os.environ:
conf.load_conf(os.environ['DPARK_CONF'])
elif os.path.exists('/etc/dpark.conf'):
conf.load_conf('/etc/dpark.conf')
if options.mem is None:
options.mem = conf.MEM_PER_TASK
conf.__dict__.update(os.environ)
import moosefs
moosefs.MFS_PREFIX = conf.MOOSEFS_MOUNT_POINTS
moosefs.master.ENABLE_DCACHE = conf.MOOSEFS_DIR_CACHE
@singleton
class DparkContext(object):
nextShuffleId = 0
def __init__(self, master=None):
self.master = master
self.initialized = False
self.started = False
self.defaultParallelism = 2
def init(self):
if self.initialized:
return
options = parse_options()
self.options = options
master = self.master or options.master
if master == 'local':
self.scheduler = LocalScheduler()
self.isLocal = True
elif master == 'process':
self.scheduler = MultiProcessScheduler(options.parallel)
self.isLocal = False
else:
if master == 'mesos':
master = conf.MESOS_MASTER
if master.startswith('mesos://'):
if '@' in master:
master = master[master.rfind('@')+1:]
else:
master = master[master.rfind('//')+2:]
elif master.startswith('zoo://'):
master = 'zk' + master[3:]
if ':' not in master:
master += ':5050'
self.scheduler = MesosScheduler(master, options)
self.isLocal = False
self.master = master
if options.parallel:
self.defaultParallelism = options.parallel
else:
self.defaultParallelism = self.scheduler.defaultParallelism()
self.defaultMinSplits = max(self.defaultParallelism, 2)
self.initialized = True
@staticmethod
def setLogLevel(level):
logging.getLogger('dpark').setLevel(level)
def newShuffleId(self):
self.nextShuffleId += 1
return self.nextShuffleId
def parallelize(self, seq, numSlices=None):
self.init()
if numSlices is None:
numSlices = self.defaultParallelism
return ParallelCollection(self, seq, numSlices)
def makeRDD(self, seq, numSlices=None):
return self.parallelize(seq, numSlices)
def textFile(self, path, ext='', followLink=True, maxdepth=0, cls=TextFileRDD, *ka, **kws):
self.init()
if isinstance(path, (list, tuple)):
return self.union([self.textFile(p, ext, followLink, maxdepth, cls, *ka, **kws)
for p in path])
path = os.path.realpath(path)
def create_rdd(cls, path, *ka, **kw):
if cls is TextFileRDD:
if path.endswith('.bz2'):
return BZip2FileRDD(self, path, *ka, **kw)
elif path.endswith('.gz'):
return GZipFileRDD(self, path, *ka, **kw)
return cls(self, path, *ka, **kw)
if os.path.isdir(path):
paths = []
for root,dirs,names in walk(path, followlinks=followLink):
if maxdepth > 0:
depth = len(filter(None, root[len(path):].split('/'))) + 1
if depth > maxdepth:
break
for n in sorted(names):
if n.endswith(ext) and not n.startswith('.'):
p = os.path.join(root, n)
if followLink or not os.path.islink(p):
paths.append(p)
dirs.sort()
for d in dirs[:]:
if d.startswith('.'):
dirs.remove(d)
rdds = [create_rdd(cls, p, *ka, **kws)
for p in paths]
return self.union(rdds)
else:
return create_rdd(cls, path, *ka, **kws)
def partialTextFile(self, path, begin, end, splitSize=None, numSplits=None):
self.init()
return PartialTextFileRDD(self, path, begin, end, splitSize, numSplits)
def bzip2File(self, *args, **kwargs):
"deprecated"
logger.warning("bzip2File() is deprecated, use textFile('xx.bz2') instead")
return self.textFile(cls=BZip2FileRDD, *args, **kwargs)
def csvFile(self, path, dialect='excel', *args, **kwargs):
return self.textFile(path, cls=TextFileRDD, *args, **kwargs).fromCsv(dialect)
def binaryFile(self, path, fmt=None, length=None, *args, **kwargs):
return self.textFile(path, cls=BinaryFileRDD, fmt=fmt, length=length, *args, **kwargs)
def tableFile(self, path, *args, **kwargs):
return self.textFile(path, cls=TableFileRDD, *args, **kwargs)
def tabular(self, path, **kw):
self.init()
return TabularRDD(self, path, **kw)
def table(self, path, **kwargs):
dpath = path[0] if isinstance(path, (list, tuple)) else path
for root, dirs, names in walk(dpath):
if '.field_names' in names:
p = os.path.join(root, '.field_names')
fields = open(p).read().split('\t')
break
else:
raise Exception("no .field_names found in %s" % path)
return self.tableFile(path, **kwargs).asTable(fields)
def beansdb(self, path, depth=None, filter=None, fullscan=False, raw=False, only_latest=False):
"(Key, (Value, Version, Timestamp)) data in beansdb"
self.init()
if isinstance(path, (tuple, list)):
return self.union([self.beansdb(p, depth, filter, fullscan, raw, only_latest)
for p in path])
path = os.path.realpath(path)
assert os.path.exists(path), "%s no exists" % path
if os.path.isdir(path):
subs = []
if not depth:
subs = [os.path.join(path, n) for n in os.listdir(path) if n.endswith('.data')]
if subs:
rdd = self.union([BeansdbFileRDD(self, p, filter, fullscan, True)
for p in subs])
else:
subs = [os.path.join(path, '%x'%i) for i in range(16)]
rdd = self.union([self.beansdb(p, depth and depth-1, filter, fullscan, True, only_latest)
for p in subs if os.path.exists(p)])
only_latest = False
else:
rdd = BeansdbFileRDD(self, path, filter, fullscan, True)
# choose only latest version
if only_latest:
rdd = rdd.reduceByKey(lambda v1,v2: v1[2] > v2[2] and v1 or v2, int(ceil(len(rdd) / 4)))
if not raw:
rdd = rdd.mapValue(lambda (v,ver,t): (restore_value(*v), ver, t))
return rdd
def union(self, rdds):
return UnionRDD(self, rdds)
def zip(self, rdds):
return ZippedRDD(self, rdds)
def accumulator(self, init=0, param=None):
return Accumulator(init, param)
def broadcast(self, v):
self.start()
from dpark.broadcast import Broadcast
return Broadcast(v)
def start(self):
if self.started:
return
self.init()
env.start(True)
self.scheduler.start()
self.started = True
atexit.register(self.stop)
def handler(signm, frame):
logger.error("got signal %d, exit now", signm)
self.scheduler.shutdown()
try:
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGABRT, handler)
signal.signal(signal.SIGQUIT, handler)
except: pass
try:
from rfoo.utils import rconsole
rconsole.spawn_server(locals(), 0)
except ImportError:
pass
def runJob(self, rdd, func, partitions=None, allowLocal=False):
self.start()
if partitions is None:
partitions = range(len(rdd))
try:
gc.disable()
for it in self.scheduler.runJob(rdd, func, partitions, allowLocal):
yield it
finally:
gc.collect()
gc.enable()
def clear(self):
if not self.started:
return
self.scheduler.clear()
gc.collect()
def stop(self):
if not self.started:
return
env.stop()
self.scheduler.stop()
self.started = False
def __getstate__(self):
raise ValueError("should not pickle ctx")
parser = optparse.OptionParser(usage="Usage: %prog [options] [args]")
def add_default_options():
parser.disable_interspersed_args()
group = optparse.OptionGroup(parser, "Dpark Options")
group.add_option("-m", "--master", type="string", default="local",
help="master of Mesos: local, process, host[:port], or mesos://")
# group.add_option("-n", "--name", type="string", default="dpark",
# help="job name")
group.add_option("-p", "--parallel", type="int", default=0,
help="number of processes")
group.add_option("-c", "--cpus", type="float", default=1.0,
help="cpus used per task")
group.add_option("-M", "--mem", type="float",
help="memory used per task")
group.add_option("-g", "--group", type="string", default="",
help="which group of machines")
group.add_option("--err", type="float", default=0.0,
help="acceptable ignored error record ratio (0.01%)")
group.add_option("--snapshot_dir", type="string", default="",
help="shared dir to keep snapshot of RDDs")
group.add_option("--conf", type="string",
help="path for configuration file")
group.add_option("--self", action="store_true",
help="user self as exectuor")
group.add_option("--profile", action="store_true",
help="do profiling")
group.add_option("--keep-order", action="store_true",
help="deprecated, always keep order")
group.add_option("-I","--image", type="string",
help="image name for Docker")
group.add_option("-V","--volumes", type="string",
help="volumes to mount into Docker")
parser.add_option_group(group)
parser.add_option("-q", "--quiet", action="store_true")
parser.add_option("-v", "--verbose", action="store_true")
add_default_options()
def parse_options():
options, args = parser.parse_args()
setup_conf(options)
options.logLevel = (options.quiet and logging.ERROR
or options.verbose and logging.DEBUG or logging.INFO)
log_format = '%(asctime)-15s [%(levelname)s] [%(name)-9s] %(message)s'
logging.basicConfig(format=log_format, level=options.logLevel)
logger = logging.getLogger('dpark')
logger.propagate=False
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(handler)
logger.setLevel(max(options.logLevel, logger.level))
return options
| fe11x/dpark | dpark/context.py | Python | bsd-3-clause | 11,856 |
"""
Routes data between subnets and networks.
"""
from networkdevice import NetworkDevice
from ethernet import Ethernet
from packet import Packet
from exceptions import DiscoveryFailure
from warnings import warn
from threading import Thread
class Router(NetworkDevice):
def __init__(self, **kwargs):
if not 'nports' in kwargs:
kwargs['nports'] = 4
super(Router, self).__init__(**kwargs)
self._thread_running = False
self._kill_thread = False
def ping(self, host):
if not self.discover(host.ip_addr):
warn("Failed to discover host", DiscoveryFailure)
return False
eth_packet = Ethernet(host.mac_addr, self.mac_addr, "\x08\x00", "PING",
"good")
eth_packet = Packet(eth_packet)
self.send_data(host.ip_addr, eth_packet)
return True
| unazed/PyT | router.py | Python | gpl-3.0 | 880 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/384')
from data_384 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:3]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=4)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
#show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Final/results/2-categories/test10_cross_validate_categories_mov_fixed_1200ms.py | Python | mit | 4,331 |
"""
Handle basic SQLite 3 functions here, presenting a unified interface that
other DBs could also follow to be transparently replaced (except for SQL
differences).
"""
import sqlite3
def Connect(database_path):
"""Returns sqlite3 Database Connection object."""
# Connect, parse the column names and the data types. Set isolation_level=None (Auto-Commit)
#TODO(g): Handle transactions. For now they are just in the way as I'm only doing simple things for the demo.
db = sqlite3.connect(database_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
# Allow accessing the rows with column indexes or column field names (case-insensitive)
db.row_factory = sqlite3.Row
return db
def GetCursor(database_connection):
"""Get database connection cursor"""
cursor = database_connection.cursor()
return cursor
def Close(db):
"""Takes a SQLite3 Database object and closes the connection. Returns None."""
db.close()
def Query(cursor, sql, params=None):
"""Query against the Cursor. Params will be formatted into the sql query string if present.
Returns: list of dicts
"""
# No parameters
if not params:
cursor.execute(sql)
# Else, parameters need to be passed in too
else:
cursor.execute(sql, params)
# Get the SQLite object rows
object_rows = cursor.fetchall()
# Convert the object rows into dicts for my preferred usage (requires setting connection: conn.row_factory = sqlite3.Row)
rows = []
for row in object_rows:
rows.append(dict(row))
return rows
| ghowland/slicuist | scripts/database/sqlite_wrapper.py | Python | mit | 1,572 |
#!/usr/bin/env python
#-*-indent-tabs-mode: nil-*-
import sys
import os.path
import gi
from gi.repository import Gtk, Gio
SCHEMAS = "org.sagarmatha.desklets.launcher"
LAUNCHER_KEY = "launcher-list"
HOME_DIR = os.path.expanduser("~")+"/"
CUSTOM_LAUNCHERS_PATH = HOME_DIR + ".sagarmatha/panel-launchers/"
EDITOR_DIALOG_UI_PATH = "/usr/share/sagarmatha/desklets/[email protected]/editorDialog.ui"
class EditorDialog:
def __init__(self, desklet_id=-1):
self.launcher_settings = Gio.Settings.new(SCHEMAS)
self.launcher_type = "Application"
self.name = ""
self.desklet_id = desklet_id
if not desklet_id == -1:
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
launcher = ""
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher = item.split(":")[1][:-8]
break;
self.name = launcher
if self.name[:24] == "sagarmatha-custom-launcher":
self.launcher_type = "Custom Application"
self.tree = Gtk.Builder()
self.tree.add_from_file(EDITOR_DIALOG_UI_PATH)
self.dialog = self.tree.get_object("dialog")
self.launcher_type_combo_box = self.tree.get_object("launcher_type_combo_box")
self.name_entry = self.tree.get_object("name_entry")
self.title_entry = self.tree.get_object("title_entry")
self.command_entry = self.tree.get_object("command_entry")
self.icon_name_entry = self.tree.get_object("icon_name_entry")
self.launcher_icon = self.tree.get_object("launcher_icon")
self.name_entry.set_text(self.name)
self.model = self.launcher_type_combo_box.get_model()
self.citer = [self.model.get_iter_from_string("0"),self.model.get_iter_from_string("1")]
self.launcher_type_combo_box.set_active_iter(self.citer[self.launcher_type_to_index(self.launcher_type)])
self.update_sensitivity()
self.set_fields_by_name()
self.on_icon_changed(self.icon_name_entry.get_text())
self.tree.connect_signals(self)
self.dialog.show_all()
self.dialog.connect("destroy", Gtk.main_quit)
self.dialog.connect("key_release_event", self.on_key_release_event)
Gtk.main()
def launcher_type_to_index(self,launcher_type):
if launcher_type == "Application":
return 0
elif launcher_type == "Custom Application":
return 1
def update_sensitivity(self):
sensitive = True
if (self.launcher_type == "Application"):
sensitive = False
self.name_entry.set_sensitive(not sensitive)
self.title_entry.set_sensitive(sensitive)
self.command_entry.set_sensitive(sensitive)
self.icon_name_entry.set_sensitive(sensitive)
if (self.launcher_type == "Application"):
self.name_entry.grab_focus()
else:
self.title_entry.grab_focus()
def on_launcher_type_combo_box_changed(self, widget):
self.launcher_type = self.launcher_type_combo_box.get_active_text()
self.update_sensitivity()
self.on_name_changed(self.name_entry)
def on_icon_changed(self, widget):
self.launcher_icon.set_from_icon_name(self.icon_name_entry.get_text(), 48)
def on_name_changed(self, widget):
if (self.launcher_type == "Application"):
self.set_fields_by_name()
def set_fields_by_name(self):
application = Application(self.name_entry.get_text() + ".desktop")
if application.title:
self.title_entry.set_text(application.title)
self.command_entry.set_text(application.command)
self.icon_name_entry.set_text(application.icon_name)
def on_key_release_event(self, widget, event):
if event.keyval == 65293: # Enter button
self.on_edit_ok_clicked(widget)
def on_edit_close_clicked(self, widget):
self.dialog.destroy()
def on_edit_ok_clicked(self, widget):
if not self.name_entry.get_text():
return None
if (self.launcher_type == "Application"):
launcher_name = self.name_entry.get_text() + ".desktop"
elif (self.launcher_type == "Custom Application"):
launcher_name = self.write_custom_application()
enabled_desklets = None
if self.desklet_id == -1: # Add new launcher
settings = Gio.Settings.new("org.sagarmatha")
self.desklet_id = settings.get_int("next-desklet-id")
settings.set_int("next-desklet-id", self.desklet_id + 1)
enabled_desklets = settings.get_strv("enabled-desklets")
enabled_desklets.append("[email protected]:%s:0:100" % self.desklet_id)
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
# If the application is initiall set in the list, remove them all
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher_list.remove(item)
launcher_list.append(str(self.desklet_id) + ":" + launcher_name)
self.launcher_settings.set_strv(LAUNCHER_KEY, launcher_list)
# Update desklets list now if new desklet is made
if enabled_desklets:
settings.set_strv("enabled-desklets", enabled_desklets)
self.dialog.destroy()
def get_custom_id(self):
i = 1
directory = Gio.file_new_for_path(CUSTOM_LAUNCHERS_PATH)
if not directory.query_exists(None):
directory.make_directory_with_parents(None)
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'sagarmatha-custom-launcher-' + str(i) + '.desktop')
while fileRec.query_exists(None):
i = i + 1
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'sagarmatha-custom-launcher-' + str(i) + '.desktop')
return i;
def write_custom_application(self):
i = self.get_custom_id();
file_name = "sagarmatha-custom-launcher-" + str(i) + ".desktop"
file_path = CUSTOM_LAUNCHERS_PATH + file_name
title = self.title_entry.get_text()
command = self.command_entry.get_text()
icon_name = self.icon_name_entry.get_text()
_file = open(file_path,"w+")
write_list=["[Desktop Entry]\n","Type=Application\n", "Name=" + title + "\n","Exec=" + command + "\n","Icon=" + icon_name + "\n"]
_file.writelines(write_list)
_file.close()
return file_name
class Application:
def __init__(self, file_name):
self.file_name = file_name
self._path = None
self.icon_name = None
self.title = None
self.command = None
if (os.path.exists(CUSTOM_LAUNCHERS_PATH + file_name)):
self._path = CUSTOM_LAUNCHERS_PATH + file_name
elif (os.path.exists("/usr/share/applications/" + file_name)):
self._path = "/usr/share/applications/" + file_name
if self._path:
self._file = open(self._path, "r")
while self._file:
line = self._file.readline()
if len(line)==0:
break
if (line.find("Name") == 0 and (not "[" in line)):
self.title = line.replace("Name","").replace("=","").replace("\n","")
if (line.find("Icon") == 0):
self.icon_name = line.replace("Icon","").replace(" ","").replace("=","").replace("\n","")
if (line.find("Exec") == 0):
self.command = line.replace("Exec","").replace("=","").replace("\n","")
if self.icon_name and self.title and self.command:
break
if not self.icon_name:
self.icon_name = "application-x-executable"
if not self.title:
self.title = "Application"
if not self.command:
self.command = ""
self._file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
dialog = EditorDialog(sys.argv[1])
else:
dialog = EditorDialog()
| chitwanix/Sagarmatha | files/usr/share/sagarmatha/desklets/[email protected]/editorDialog.py | Python | gpl-2.0 | 8,184 |
#!/usr/bin/env python
#
# Copyright (C) 2015--2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public Licensese as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import numpy
import math
import random
from astropy.io import fits
from ximpol import XIMPOL_IRF
from ximpol import XIMPOL_DATA
from ximpol.irf.arf import xEffectiveArea
from ximpol.irf.mrf import xModulationFactor
from ximpol.utils.logging_ import logger
from ximpol.utils.matplotlib_ import pyplot as plt
from ximpol.utils.matplotlib_ import overlay_tag, save_current_figure
from ximpol.core.spline import xInterpolatedUnivariateSplineLinear
from ximpol.config.grb_swift_download import download_swift_grb_lc_file
from ximpol.config.grb_swift_download import get_all_swift_grb_names
from ximpol.config.grb_utils import parse_light_curve
from ximpol.config.grb_utils import get_grb_spec_index, get_grb_position
from ximpol.core.fitsio import xBinTableHDUBase, xPrimaryHDU
MIN_ENERGY = 2.
MAX_ENERGY = 10.
ENERGY_BINNING = numpy.array([MIN_ENERGY, MAX_ENERGY])
OUTFILE = os.path.join(XIMPOL_DATA,'GRBmainInfos.fits')
from ximpol.irf import load_arf, load_mrf
from ximpol.irf import DEFAULT_IRF_NAME
from ximpol.srcmodel.spectrum import int_eflux2pl_norm, xCountSpectrum
aeff = load_arf(DEFAULT_IRF_NAME)
modf = load_mrf(DEFAULT_IRF_NAME)
process_grb_mdp = False
mdp_vs_time = False
class xBinTableGRBmain(xBinTableHDUBase):
NAME = 'GRB_MAIN'
HEADER_KEYWORDS = []
DATA_SPECS = [
('NAME' , 'A20', None , 'grb name'),
('ENERGY_LO' , 'E', 'keV' , 'energy low'),
('ENERGY_HI' , 'E', 'keV' , 'energy high'),
('RA' , 'E', 'degrees' , 'grb right ascension'),
('DEC' , 'E', 'degrees' , 'grb declination'),
('INDEX' , 'E', None , 'late spectral index'),
('START' , 'D', 's' , 'observation start time'),
('STOP' , 'D', 's' , 'observation stop time'),
('PROMPT_FLUX' , 'E', 'erg/cm2/s' , 'grb prompt flux'),
('PROMPT_START', 'D', 's' , 'grb prompt flux'),
('PROMPT_STOP' , 'D', 's' , 'grb prompt flux'),
('GRB_START' , 'D', 's' , 'grb start time'),
('GRB_STOP' , 'D', 's' , 'grb stop time'),
('EFFECTIVE_MU', 'E', None , 'effective modulation factor'),
('COUNTS' , 'J', None , 'total counts'),
('MDP 99%' , 'E', None , 'mdp')
]
def build_grb_fits_file(data,outfile):
primary_hdu = xPrimaryHDU()
grb_info_hdu = xBinTableGRBmain(data)
hdu_list = fits.HDUList([primary_hdu, grb_info_hdu])
hdu_list.info()
logger.info('Writing GRB main infos table to %s...' % outfile)
hdu_list.writeto(outfile, clobber=True)
logger.info('Done.')
def process_grb(grb_name, tstart=21600., duration=30000., prompt_duration=600):
"""
"""
file_path = download_swift_grb_lc_file(grb_name)
if file_path is None:
return None
pl_index = get_grb_spec_index(file_path)
ra, dec = get_grb_position(file_path)
light_curve = parse_light_curve(file_path, num_min_data=5.)
if light_curve is None:
return None
t = light_curve.x
grb_start, prompt_tstart = t[0], t[0]
grb_stop = t[-1]
prompt_tstop = t[0] + prompt_duration
prompt_flux = light_curve.integral(prompt_tstart, prompt_tstop)
logger.info('Integral energy flux in %.3f--%.3f s: %.3e erg cm^{-2}' %\
(prompt_tstart, prompt_tstop, prompt_flux))
tstart = max(tstart, t[0])
tstop = min(tstart + duration, t[-1])
logger.info('Effective time interval for the MDP: %.3f--%.3f s' %\
(tstart, tstop))
t = t[(t >= tstart)*(t <= tstop)]
if len(t) < 2:
return None
scale_factor = int_eflux2pl_norm(1., 0.3, 10., pl_index, erg=True)
pl_norm = light_curve.scale(scale_factor)# Fix the label.
def energy_spectrum(E, t):
return pl_norm(t)*numpy.power(E, -pl_index)
count_spectrum = xCountSpectrum(energy_spectrum, aeff, t)
mdp_table = count_spectrum.build_mdp_table(ENERGY_BINNING, modf)
logger.info(mdp_table)
mdp = mdp_table.mdp_values()[0]
eff_mu = [row.mu_effective for row in mdp_table.rows]
counts = [row.num_signal for row in mdp_table.rows]
grb_values = [ra, dec, pl_index, tstart, tstop, prompt_flux, prompt_tstart,\
prompt_tstop, grb_start, grb_stop, eff_mu[0], counts[0], mdp]
return grb_values
def process_grb_list(tstart=21600., duration=30000., prompt_duration=600):
"""
"""
name = numpy.array([], dtype=str)
e_low = numpy.array([])
e_high = numpy.array([])
ra = numpy.array([])
dec = numpy.array([])
index = numpy.array([])
start = numpy.array([])
stop = numpy.array([])
prompt_start = numpy.array([])
prompt_stop = numpy.array([])
prompt_flux = numpy.array([])
grb_start = numpy.array([])
grb_stop = numpy.array([])
eff_mu = numpy.array([])
counts = numpy.array([], dtype=numpy.int64)
mdp = numpy.array([])
for grb_name in get_all_swift_grb_names():
logger.info('Processing %s...' % grb_name)
grb_values = process_grb(grb_name,tstart=tstart,duration=duration,\
prompt_duration=prompt_duration)
if grb_values is None:
continue
name = numpy.append(name,[grb_name])
e_low = numpy.append(e_low,[MIN_ENERGY])
e_high = numpy.append(e_high,[MAX_ENERGY])
ra = numpy.append(ra,[grb_values[0]])
dec = numpy.append(dec,[grb_values[1]])
index = numpy.append(index,[grb_values[2]])
start = numpy.append(start,[grb_values[3]])
stop = numpy.append(stop,[grb_values[4]])
prompt_flux = numpy.append(prompt_flux,[grb_values[5]])
prompt_start = numpy.append(prompt_start,[grb_values[6]])
prompt_stop = numpy.append(prompt_stop,[grb_values[7]])
grb_start = numpy.append(grb_start,[grb_values[8]])
grb_stop = numpy.append(grb_stop,[grb_values[9]])
eff_mu = numpy.append(eff_mu,[grb_values[10]])
counts = numpy.append(counts,[grb_values[11]])
mdp = numpy.append(mdp,[grb_values[12]])
grb_values_array = [name,e_low,e_high,ra,dec,index,start,stop,prompt_flux,\
prompt_start,prompt_stop,grb_start,grb_stop,eff_mu,\
counts,mdp]
return grb_values_array
def get_spectrum(_energy, norm, index):
"""Power law assumed for the energy.
Returns the array with the spectrum values in [KeV-1 cm-2 s-1]
given an energy array.
"""
return aeff(_energy)*norm*numpy.power(_energy, -index)
def plot_grb_mdp_vs_repoint(grb_name, _t_repoint, t_obs=50000, \
color='black', show=True):
"""Plot all the MDP (changing the repointing elapsed time defined in *arg)
for a given GRB.
"""
mdp_list = []
for repoint in _t_repoint:
mdp = process_grb(grb_name,tstart=repoint,duration=t_obs)[-1]
if mdp is not None:
mdp_list.append(mdp)
else:
mdp_list.append(0.)
_mdp = numpy.array(mdp_list)*100
plt.plot(_t_repoint, _mdp, marker='.',linestyle='-', lw=0.5, color=color,\
label=grb_name)
plt.xlabel('$t_{repoint}$ [s]')
plt.ylabel('2.-10. keV MDP (%)')
plt.title('MDP vs $t_{repoint}$, $\Delta t_{obs} =$ %i s'\
%(t_obs))
if show:
plt.show()
return _mdp, _t_repoint
def plot_grb_mdp_vs_obstime(grb_name, _t_obs, t_repoint=21600, \
color='black', show=True):
"""Plot all the MDP (changing the repointing elapsed time defined in *arg)
for a given GRB.
"""
mdp_list = []
for obs in _t_obs:
mdp = process_grb(grb_name,tstart=t_repoint,duration=obs)
if mdp is not None:
mdp = mdp[-1]
mdp_list.append(mdp)
else:
mdp_list.append(0.)
_mdp = numpy.array(mdp_list)*100
plt.plot(_t_obs,_mdp, marker='.',linestyle='-', lw=0.5, color=color,\
label=grb_name)
plt.xlabel('$\Delta t_{obs}$ [s]')
plt.ylabel('2.-10. keV MDP (%)')
plt.title('MDP vs $\Delta t_{obs}$, $ t_{repoint} =$ %i s'\
%(t_repoint))
if show:
plt.show()
return _mdp, _t_obs
def main():
"""Produce some plots
"""
# If process_grb_mdp = True, produces a fits file with all the
# main infos on each grb
if process_grb_mdp == True:
data = process_grb_list(duration=50000.)
build_grb_fits_file(data,OUTFILE)
# 1) the plot of the MDP for all the Swift GRBs
# and a given repointing time
# 2) the cumulative of the previous histogram
# 3) the plot of the correlation between MDP for all the Swift
# GRBs and a given repointing time and the integral prompt
# (first 10 min) flux
# 1)------------------------------------------------------
plt.figure(figsize=(10, 6), dpi=80)
bins = numpy.linspace(0, 100, 100)
hdulist = fits.open(OUTFILE)
grbdata = hdulist[1].data
_mdp = grbdata['MDP 99%']
t_obs = '50000'
t_rep = '21600'
plt.title('%i GRBs, $\Delta t_{obs}=%s s,$ $t_{repoint}=%s s$'\
%(len(_mdp),t_obs,t_rep))
plt.hist(_mdp*100, bins, alpha=0.5)
plt.xlabel('2.-10. keV MDP (%)')
plt.ylabel('Number of GRBs')
overlay_tag()
save_current_figure('all_grbs_MDP_histo', clear=False)
# 2)----------------------------------------------------
plt.figure(figsize=(10, 6), dpi=80)
plt.title('%i GRBs, $\Delta t_{obs}=%s s,$ $t_{repoint}=%s s$'\
%(len(_mdp),t_obs,t_rep))
(n, bins, patches) = plt.hist(_mdp*100, bins, histtype='step', \
cumulative=True)
plt.xlabel('2.-10. keV MDP (%)')
plt.ylabel('Cumulative number of GRBs')
for i in range(0,30):
print 'MDP %.2f%%: %i GRBs'%(i,n[i])
overlay_tag()
save_current_figure('all_grbs_MDP_cumulative', clear=False)
# 3)------------------------------------------------------
plt.figure(figsize=(10, 6), dpi=80)
ax = plt.gca()
_prompt_tstart = grbdata['PROMPT_START']
_flux = grbdata['PROMPT_FLUX']
_good_indexes = numpy.where(_prompt_tstart>350)
_flux = numpy.delete(_flux,_good_indexes)
_mdp = numpy.delete(_mdp,_good_indexes)
plt.scatter(_mdp*100, _flux, s=30, marker='.', color='blue')
plt.xlabel('2.-10. keV MDP (%)')
plt.ylabel('[erg $\cdot$ cm$^{-2}$]')
plt.title('%i GRBs, $\Delta t_{obs}=%s s,$ $t_{repoint}=%s s$'%(len(_flux),\
t_obs,t_rep))
plt.xlim(1, 100)
plt.ylim(1e-9,1e-4)
plt.plot([20, 20], [1e-9,1e-4], 'k--', lw=1, color='green')
ax.set_yscale('log')
ax.set_xscale('log')
overlay_tag()
save_current_figure('grb_MDP_prompt',clear=False)
plt.show()
# If mdp_vs_time = True Produces:
# 1) the plot of the MDP for a given GRB
# as a function of the repointing time
# 2) the plot of the MDP for a given GRB
# as a function of the observation duration
color_list = ['red','salmon','goldenrod','darkgreen','limegreen',\
'royalblue','mediumpurple','darkviolet','deeppink']\
#'yellow','darkcyan']
if mdp_vs_time == True:
grb_list = ['GRB 060729', 'GRB 080411', 'GRB 091127', 'GRB 111209A',\
'GRB 120711A', 'GRB 130427A', 'GRB 130505A', 'GRB 130907A',\
'GRB 150403A']
#1)------------------------------------------------------
plt.figure(figsize=(10, 6), dpi=80)
ax = plt.gca()
for i,grb in enumerate(grb_list):
repointing_time = numpy.logspace(2,4.8,20)
plot_grb_mdp_vs_repoint(grb,repointing_time,show=False,\
color=color_list[i])
ax.legend(loc='upper left', shadow=False, fontsize='small')
#plt.ylim(0,100)
plt.plot([21600, 21600], [0, 100], 'k--', lw=1, color='green')
plt.plot([43200, 43200], [0, 100], 'k--', lw=1,color='green')
ax.set_yscale('log')
ax.set_xscale('log')
overlay_tag()
save_current_figure('grb_MDP_vs_repoint',clear=False)
#2)------------------------------------------------------
plt.figure(figsize=(10, 6), dpi=80)
ax = plt.gca()
for i,grb in enumerate(grb_list):
obs_time = numpy.logspace(3,5,30)
plot_grb_mdp_vs_obstime(grb,obs_time,show=False,color=color_list[i])
ax.legend(loc='upper right', shadow=False, fontsize='small')
ax.set_yscale('log')
ax.set_xscale('log')
overlay_tag(x=0.5)
save_current_figure('grb_MDP_vs_obstime',clear=False)
plt.show()
if __name__=='__main__':
main()
| lucabaldini/ximpol | ximpol/examples/grb_swift_mdp.py | Python | gpl-3.0 | 13,622 |
# encoding: utf8
"""Collection of small functions and scraps of data that don't belong in the
pokedex core -- either because they're inherently Web-related, or because
they're very flavorful and don't belong or fit well in a database.
"""
from __future__ import absolute_import, division
import math
import re
from itertools import groupby, chain, repeat
from operator import attrgetter
from pylons import url
import pokedex.db.tables as tables
import pokedex.formulae as formulae
from pokedex.roomaji import romanize
import spline.lib.helpers as h
# We can't translate at import time, but _ will mark strings as translatable
# Functions that need translation will take a "_" parameter, which defaults
# to this:
_ = unicode
def make_thingy_url(thingy, subpage=None):
u"""Given a thingy (Pokémon, move, type, whatever), returns a URL to it.
"""
# Using the table name as an action directly looks kinda gross, but I can't
# think of anywhere I've ever broken this convention, and making a
# dictionary to get data I already have is just silly
args = {}
# Pokémon with forms need the form attached to the URL
if getattr(thingy, 'forme_base_pokemon_id', None):
args['form'] = thingy.forme_name
# Items are split up by pocket
if isinstance(thingy, tables.Item):
args['pocket'] = thingy.pocket.identifier
action = thingy.__tablename__
if subpage:
action += '_' + subpage
return url(controller='dex',
action=action,
name=thingy.name.lower(),
**args)
def render_flavor_text(flavor_text, literal=False):
"""Makes flavor text suitable for HTML presentation.
If `literal` is false, collapses broken lines into single lines.
If `literal` is true, linebreaks are preserved exactly as they are in the
games.
"""
# n.b.: \u00ad is soft hyphen
# Somehow, the games occasionally have \n\f, which makes no sense at all
# and wouldn't render in-game anyway. Fix this
flavor_text = flavor_text.replace('\n\f', '\f')
if literal:
# Page breaks become two linebreaks.
# Soft hyphens become real hyphens.
# Newlines become linebreaks.
html = flavor_text.replace(u'\f', u'<br><br>') \
.replace(u'\u00ad', u'-') \
.replace(u'\n', u'<br>')
else:
# Page breaks are treated just like newlines.
# Soft hyphens followed by newlines vanish.
# Letter-hyphen-newline becomes letter-hyphen, to preserve real
# hyphenation.
# Any other newline becomes a space.
html = flavor_text.replace(u'\f', u'\n') \
.replace(u'\u00ad\n', u'') \
.replace(u'\u00ad', u'') \
.replace(u' -\n', u' - ') \
.replace(u'-\n', u'-') \
.replace(u'\n', u' ')
return h.literal(html)
## Collapsing
def collapse_flavor_text_key(literal=True):
"""A wrapper around `render_flavor_text`. Returns a function to be used
as a key for `collapse_versions`, or any other function which takes a key.
"""
def key(text):
return render_flavor_text(text.flavor_text, literal=literal)
return key
def group_by_generation(things):
"""A wrapper around itertools.groupby which groups by generation."""
things = iter(things)
try:
a_thing = things.next()
except StopIteration:
return ()
key = get_generation_key(a_thing)
return groupby(chain([a_thing], things), key)
def get_generation_key(sample_object):
"""Given an object, return a function which retrieves the generation.
Tries x.generation, x.version_group.generation, and x.version.generation.
"""
if hasattr(sample_object, 'generation'):
return attrgetter('generation')
elif hasattr(sample_object, 'version_group'):
return (lambda x: x.version_group.generation)
elif hasattr(sample_object, 'version'):
return (lambda x: x.version.generation)
raise AttributeError
def collapse_versions(things, key):
"""Collapse adjacent equal objects and remember their versions.
Yields tuples of ([versions], key(x)). Uses itertools.groupby internally.
"""
things = iter(things)
# let the StopIteration bubble up
a_thing = things.next()
if hasattr(a_thing, 'version'):
def get_versions(things):
return [x.version for x in things]
elif hasattr(a_thing, 'version_group'):
def get_versions(things):
return sum((x.version_group.versions for x in things), [])
for collapsed_key, group in groupby(chain([a_thing], things), key):
yield get_versions(group), collapsed_key
### Images and links
def filename_from_name(name):
"""Shorten the name of a whatever to something suitable as a filename.
e.g. Water's Edge -> waters-edge
"""
name = unicode(name)
name = name.lower()
# TMs and HMs share sprites
if re.match(u'^[th]m\d{2}$', name):
if name[0:2] == u'tm':
return u'tm-normal'
else:
return u'hm-normal'
# As do data cards
if re.match(u'^data card \d+$', name):
return u'data-card'
name = re.sub(u'[ _]+', u'-', name)
name = re.sub(u'[\'.]', u'', name)
return name
def pokedex_img(src, **attr):
return h.HTML.img(src=url(controller='dex', action='media', path=src), **attr)
# XXX Should these be able to promote to db objects, rather than demoting to
# strings and integers? If so, how to do that without requiring db access
# from here?
def generation_icon(generation, _=_):
"""Returns a generation icon, given a generation number."""
# Convert generation to int if necessary
if not isinstance(generation, int):
generation = generation.id
return pokedex_img('versions/generation-%d.png' % generation,
alt=_(u"Generation %d") % generation,
title=_(u"Generation %d") % generation)
def version_icons(*versions, **kwargs):
"""Returns some version icons, given a list of version names.
Keyword arguments:
`dex_translate`: translation function for version names
"""
# python's argument_list syntax is kind of limited here
dex_translate = kwargs.get('dex_translate', _)
version_icons = u''
comma = chain([u''], repeat(u', '))
for version in versions:
# Convert version to string if necessary
if not isinstance(version, basestring):
version = version.name
version_filename = filename_from_name(version)
version_icons += pokedex_img(u'versions/%s.png' % version_filename,
alt=comma.next() + dex_translate(version),
title=dex_translate(version))
return version_icons
def pokemon_sprite(pokemon, prefix='heartgold-soulsilver', **attr):
"""Returns an <img> tag for a Pokémon sprite."""
# Kinda gross, but it's entirely valid to pass None as a form
form = attr.pop('form', pokemon.forme_name)
if 'animated' in prefix:
ext = 'gif'
else:
ext = 'png'
if form:
# Use the overridden form name
alt_text = "{0} {1}".format(form.title(), pokemon.name)
else:
# Use the Pokémon's default full-name
alt_text = pokemon.full_name
attr.setdefault('alt', alt_text)
attr.setdefault('title', alt_text)
if form:
filename = '%d-%s.%s' % (pokemon.national_id,
filename_from_name(form), ext)
else:
filename = '%d.%s' % (pokemon.national_id, ext)
return pokedex_img("%s/%s" % (prefix, filename), **attr)
def pokemon_link(pokemon, content=None, to_flavor=False, **attr):
"""Returns a link to a Pokémon page.
`pokemon`
A name or a Pokémon object.
`content`
Link text (or image, or whatever).
`form`
An alternate form to link to. If the form is only a sprite, the link
will be to the flavor page.
`to_flavor`
If True, the link will always be to the flavor page, regardless of
form.
"""
# Kinda gross, but it's entirely valid to pass None as a form
form = attr.pop('form', pokemon.forme_name)
if form == pokemon.forme_name and not pokemon.forme_base_pokemon_id:
# Don't use default form's name as part of the link
form = None
# Content defaults to the name of the Pokémon
if not content:
if form:
content = "%s %s" % (form.title(), pokemon.name)
else:
content = pokemon.name
url_kwargs = {}
if form:
# Don't want a ?form=None, so just only pass a form at all if there's
# one to pass
url_kwargs['form'] = form
action = 'pokemon'
if form and pokemon.normal_form.form_group \
and not pokemon.normal_form.formes:
# If a Pokémon does not have real (different species) forms, e.g.
# Unown and its letters, then a form link only makes sense if it's to a
# flavor page.
action = 'pokemon_flavor'
elif to_flavor:
action = 'pokemon_flavor'
return h.HTML.a(
content,
href=url(controller='dex', action=action,
name=pokemon.name.lower(), **url_kwargs),
**attr
)
def damage_class_icon(damage_class, dex_translate=_, _=_):
return pokedex_img(
"chrome/damage-classes/%s.png" % damage_class.name.lower(),
alt=damage_class.name,
title=_("%s: %s") % (
dex_translate(damage_class.name),
dex_translate(damage_class.description),
)
)
def type_icon(type):
if not isinstance(type, basestring):
type = type.name
return pokedex_img('chrome/types/%s.png' % type, alt=type, title=type)
def type_link(type):
return h.HTML.a(
type_icon(type),
href=url(controller='dex', action='types', name=type.name.lower()),
)
def item_link(item, include_icon=True, dex_translate=_):
"""Returns a link to the requested item."""
item_name = dex_translate(item.name)
if include_icon:
if item.pocket.identifier == u'machines':
machines = item.machines
prefix = u'hm' if machines[-1].is_hm else u'tm'
filename = prefix + u'-' + machines[-1].move.type.name.lower()
else:
filename = filename_from_name(item_name)
label = pokedex_img("items/%s.png" % filename,
alt=item_name, title=item_name) + ' ' + item_name
else:
label = item_name
return h.HTML.a(label,
href=url(controller='dex', action='items',
pocket=item.pocket.identifier, name=item_name.lower()),
)
### Labels
# Type efficacy, from percents to Unicode fractions
type_efficacy_label = {
0: '0',
25: u'¼',
50: u'½',
100: '1',
200: '2',
400: '4',
}
# Gender rates, translated from -1..8 to useful text
gender_rate_label = {
-1: _(u'genderless'),
0: _(u'always male'),
1: _(u'⅞ male, ⅛ female'),
2: _(u'¾ male, ¼ female'),
3: _(u'⅝ male, ⅜ female'),
4: _(u'½ male, ½ female'),
5: _(u'⅜ male, ⅝ female'),
6: _(u'¼ male, ¾ female'),
7: _(u'⅛ male, ⅞ female'),
8: _(u'always female'),
}
def article(noun, _=_):
"""Returns 'a' or 'an', as appropriate."""
if noun[0].lower() in u'aeiou':
return _(u'an')
return _(u'a')
def evolution_description(evolution, _=_, dex_translate=_):
"""Crafts a human-readable description from a `pokemon_evolution` row
object.
"""
chunks = []
# Trigger
if evolution.trigger.identifier == u'level_up':
chunks.append(_(u'Level up'))
elif evolution.trigger.identifier == u'trade':
chunks.append(_(u'Trade'))
elif evolution.trigger.identifier == u'use_item':
item_name = dex_translate(evolution.trigger_item.name)
chunks.append(_(u"Use {article} {item}").format(
article=article(item_name, _=_),
item=dex_translate(item_name)))
elif evolution.trigger.identifier == u'shed':
chunks.append(
_(u"Evolve {from_pokemon} ({to_pokemon} will consume "
u"a Poké Ball and appear in a free party slot)").format(
from_pokemon=dex_translate(evolution.from_pokemon.full_name),
to_pokemon=dex_translate(evolution.to_pokemon.full_name)))
else:
chunks.append(_(u'Do something'))
# Conditions
if evolution.gender:
chunks.append(_(u"{0}s only").format(evolution.gender))
if evolution.time_of_day:
chunks.append(_(u"during the {0}").format(evolution.time_of_day))
if evolution.minimum_level:
chunks.append(_(u"starting at level {0}").format(evolution.minimum_level))
if evolution.location_id:
chunks.append(_(u"around {0}").format(evolution.location.name))
if evolution.held_item_id:
chunks.append(_(u"while holding {article} {item}").format(
article=article(evolution.held_item.name),
item=evolution.held_item.name))
if evolution.known_move_id:
chunks.append(_(u"knowing {0}").format(evolution.known_move.name))
if evolution.minimum_happiness:
chunks.append(_(u"with at least {0} happiness").format(
evolution.minimum_happiness))
if evolution.minimum_beauty:
chunks.append(_(u"with at least {0} beauty").format(
evolution.minimum_beauty))
if evolution.relative_physical_stats is not None:
if evolution.relative_physical_stats < 0:
op = _(u'<')
elif evolution.relative_physical_stats > 0:
op = _(u'>')
else:
op = _(u'=')
chunks.append(_(u"when Attack {0} Defense").format(op))
return u', '.join(chunks)
### Formatting
# Attempts at reasonable defaults for trainer size, based on the average
# American
trainer_height = 17.8 # dm
trainer_weight = 780 # hg
def format_height_metric(height):
"""Formats a height in decimeters as M m."""
return "%.1f m" % (height / 10)
def format_height_imperial(height):
"""Formats a height in decimeters as F'I"."""
return "%d'%.1f\"" % (
height * 0.32808399,
(height * 0.32808399 % 1) * 12,
)
def format_weight_metric(weight):
"""Formats a weight in hectograms as K kg."""
return "%.1f kg" % (weight / 10)
def format_weight_imperial(weight):
"""Formats a weight in hectograms as L lb."""
return "%.1f lb" % (weight / 10 * 2.20462262)
### General data munging
def scale_sizes(size_dict, dimensions=1):
"""Normalizes a list of sizes so the largest is 1.0.
Use `dimensions` if the sizes are non-linear, i.e. 2 for scaling area.
"""
# x -> (x/max)^(1/dimensions)
max_size = float(max(size_dict.values()))
scaled_sizes = dict()
for k, v in size_dict.items():
scaled_sizes[k] = math.pow(v / max_size, 1.0 / dimensions)
return scaled_sizes
def apply_pokemon_template(template, pokemon, dex_translate=_, _=_):
u"""`template` should be a string.Template object.
Uses safe_substitute to inject some fields from the Pokémon into the
template.
This cheerfully returns a literal, so be sure to escape the original format
string BEFORE passing it to Template!
"""
d = dict(
icon=pokemon_sprite(pokemon, prefix=u'icons'),
id=pokemon.national_id,
name=pokemon.full_name,
height=format_height_imperial(pokemon.height),
height_ft=format_height_imperial(pokemon.height),
height_m=format_height_metric(pokemon.height),
weight=format_weight_imperial(pokemon.weight),
weight_lb=format_weight_imperial(pokemon.weight),
weight_kg=format_weight_metric(pokemon.weight),
gender=_(gender_rate_label[pokemon.gender_rate]),
species=dex_translate(pokemon.species),
base_experience=pokemon.base_experience,
capture_rate=pokemon.capture_rate,
base_happiness=pokemon.base_happiness,
)
# "Lazy" loading, to avoid hitting other tables if unnecessary. This is
# very chumpy and doesn't distinguish between literal text and fields (e.g.
# '$type' vs 'type'), but that's very unlikely to happen, and it's not a
# big deal if it does
if 'type' in template.template:
types = pokemon.types
d['type'] = u'/'.join(dex_translate(type_.name) for type_ in types)
d['type1'] = dex_translate(types[0].name)
d['type2'] = dex_translate(types[1].name) if len(types) > 1 else u''
if 'egg_group' in template.template:
egg_groups = pokemon.egg_groups
d['egg_group'] = u'/'.join(dex_translate(group.name) for group in egg_groups)
d['egg_group1'] = dex_translate(egg_groups[0].name)
d['egg_group2'] = dex_translate(egg_groups[1].name) if len(egg_groups) > 1 else u''
if 'ability' in template.template:
abilities = pokemon.abilities
d['ability'] = u'/'.join(dex_translate(ability.name) for ability in abilities)
d['ability1'] = dex_translate(abilities[0].name)
d['ability2'] = dex_translate(abilities[1].name) if len(abilities) > 1 else u''
if 'color' in template.template:
d['color'] = dex_translate(pokemon.color)
if 'habitat' in template.template:
d['habitat'] = dex_translate(pokemon.habitat)
if 'shape' in template.template:
if pokemon.shape:
d['shape'] = dex_translate(pokemon.shape.name)
else:
d['shape'] = ''
if 'hatch_counter' in template.template:
d['hatch_counter'] = pokemon.hatch_counter
if 'steps_to_hatch' in template.template:
d['steps_to_hatch'] = (pokemon.hatch_counter + 1) * 255
if 'stat' in template.template or \
'hp' in template.template or \
'attack' in template.template or \
'defense' in template.template or \
'speed' in template.template or \
'effort' in template.template:
d['effort'] = u', '.join("{0} {1}".format(_.effort, _.stat.name)
for _ in pokemon.stats if _.effort)
d['stats'] = u'/'.join(str(_.base_stat) for _ in pokemon.stats)
for pokemon_stat in pokemon.stats:
key = pokemon_stat.stat.name.lower().replace(' ', '_')
d[key] = pokemon_stat.base_stat
return h.literal(template.safe_substitute(d))
def apply_move_template(template, move):
u"""`template` should be a string.Template object.
Uses safe_substitute to inject some fields from the move into the template,
just like the above.
"""
d = dict(
id=move.id,
name=move.name,
type=move.type.name,
damage_class=move.damage_class.name,
pp=move.pp,
power=move.power,
accuracy=move.accuracy,
priority=move.move_effect.priority,
effect_chance=move.effect_chance,
effect=move.move_effect.short_effect,
)
return h.literal(template.safe_substitute(d))
| Sanqui/spline-pokedex | splinext/pokedex/helpers.py | Python | mit | 19,255 |
'''Viewer widgets
=================
Defines widgets used with the :mod:`~ceed.view` module. These widgets are used
to control and display the experiment on screen, both when playing the
experiment for preview and when playing the experiment full-screen in a second
process.
'''
from time import perf_counter
from typing import List
from kivy.uix.behaviors.focus import FocusBehavior
from kivy.uix.scatter import Scatter
from kivy.properties import NumericProperty, BooleanProperty
from kivy.app import App
from kivy.graphics.vertex_instructions import Point
from kivy.graphics.transformation import Matrix
from kivy.graphics.context_instructions import Color
from kivy.factory import Factory
__all__ = ('ViewRootFocusBehavior', 'MEAArrayAlign')
_get_app = App.get_running_app
class ViewRootFocusBehavior(FocusBehavior):
"""The root widget used for the second process when the experiment is
played. It adds focus behavior to the viewer.
Whenever a key is pressed in the second process it is passed on to the
controller in the main process who handles it as needed (possibly sending
a message back to the second process).
"""
def keyboard_on_key_down(self, window, keycode, text, modifiers):
_get_app().view_controller.send_keyboard_down(
keycode[1], modifiers, perf_counter())
return True
def keyboard_on_key_up(self, window, keycode):
_get_app().view_controller.send_keyboard_up(keycode[1], perf_counter())
return True
class MEAArrayAlign(Scatter):
"""The widget used during the experiment design to help align the MEA
electrode array to the camera and projector.
It displays a grid of points that you can align to the real-world camera
acquired picture of the electrode grid. See :mod:`~ceed.view.controller`
for more details.
"""
num_rows = NumericProperty(12)
"""Number of rows.
See :attr:`~ceed.view.controller.ViewControllerBase.mea_num_rows`
"""
num_cols = NumericProperty(12)
"""Number of columns.
See :attr:`~ceed.view.controller.ViewControllerBase.mea_num_cols`
"""
pitch = NumericProperty(20)
"""The distance in pixels between the rows/columns.
See :attr:`~ceed.view.controller.ViewControllerBase.mea_pitch`
"""
diameter = NumericProperty(3)
"""Diameter of each electrode circle in pixels..
See :attr:`~ceed.view.controller.ViewControllerBase.mea_diameter`
"""
show = BooleanProperty(False)
"""Whether the grid is currently shown.
"""
color = None
"""The grid color.
"""
label = None
"""The label that shows the "A1" corner electrode.
"""
label2 = None
"""The label that shows the "M1" corner electrode.
"""
def __init__(self, **kwargs):
super(MEAArrayAlign, self).__init__(**kwargs)
label = self.label = Factory.XYSizedLabel(text='A1')
self.add_widget(label)
label2 = self.label2 = Factory.XYSizedLabel(text='M1')
self.add_widget(label2)
self.fbind('num_rows', self.update_graphics)
self.fbind('num_cols', self.update_graphics)
self.fbind('pitch', self.update_graphics)
self.fbind('diameter', self.update_graphics)
self.update_graphics()
def track_show(*largs):
label.color = 1, 1, 1, (1 if self.show else 0)
label2.color = 1, 1, 1, (1 if self.show else 0)
self.fbind('show', track_show)
track_show()
def update_graphics(self, *largs):
"""Automatic callback that updates the graphics whenever any parameter
changes.
"""
self.canvas.remove_group('MEAArrayAlign')
pitch = self.pitch
radius = self.diameter / 2.0
with self.canvas:
self.color = Color(
1, 1, 1, 1 if self.show else 0, group='MEAArrayAlign')
for row in range(self.num_rows):
for col in range(self.num_cols):
Point(
points=[col * pitch, row * pitch], pointsize=radius,
group='MEAArrayAlign')
h = max((self.num_rows - 1) * pitch, 0)
w = max((self.num_cols - 1) * pitch, 0)
self.label.y = h
self.label2.y = 0
self.label2.right = self.label.right = w
self.size = w, h + 35
def on_touch_down(self, touch):
if not self.do_translation_x and \
not self.do_translation_y and \
not self.do_rotation and \
not self.do_scale:
return False
return super(MEAArrayAlign, self).on_touch_down(touch)
def on_touch_move(self, touch):
if not self.do_translation_x and \
not self.do_translation_y and \
not self.do_rotation and \
not self.do_scale:
return False
return super(MEAArrayAlign, self).on_touch_move(touch)
def on_touch_up(self, touch):
if not self.do_translation_x and \
not self.do_translation_y and \
not self.do_rotation and \
not self.do_scale:
return False
return super(MEAArrayAlign, self).on_touch_up(touch)
@staticmethod
def make_matrix(elems: List[List[float]]) -> Matrix:
"""Converts a matrix represented as a 2D list to a kivy Matrix.
"""
mat = Matrix()
mat.set(array=elems)
return mat
@staticmethod
def compare_mat(mat: Matrix, mat_list: List[List[float]]) -> bool:
"""Compares a matrix represented as a 2D list to a kivy Matrix object
and returns whether they are equivalent.
"""
return mat.tolist() == tuple(tuple(item) for item in mat_list)
| matham/Ceed | ceed/view/view_widgets.py | Python | mit | 5,758 |
from waflib import Utils
from waflib.Configure import conf
from samba_utils import get_string
done = {}
@conf
def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)):
if "done" in done:
return
done["done"] = True
conf.find_program('perl', var='PERL', mandatory=mandatory)
conf.load('perl')
path_perl = conf.find_program('perl')
conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl)
conf.check_perl_version(version)
def read_perl_config_var(cmd):
output = Utils.cmd_output([conf.env.get_flat('PERL'), '-MConfig', '-e', cmd])
if not isinstance(output, str):
output = get_string(output)
return Utils.to_list(output)
def check_perl_config_var(var):
conf.start_msg("Checking for perl $Config{%s}:" % var)
try:
v = read_perl_config_var('print $Config{%s}' % var)[0]
conf.end_msg("'%s'" % (v), 'GREEN')
return v
except IndexError:
conf.end_msg(False, 'YELLOW')
pass
return None
vendor_prefix = check_perl_config_var('vendorprefix')
perl_arch_install_dir = None
if vendor_prefix == conf.env.PREFIX:
perl_arch_install_dir = check_perl_config_var('vendorarch');
if perl_arch_install_dir is None:
perl_arch_install_dir = "${LIBDIR}/perl5";
conf.start_msg("PERL_ARCH_INSTALL_DIR: ")
conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN')
conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir
perl_lib_install_dir = None
if vendor_prefix == conf.env.PREFIX:
perl_lib_install_dir = check_perl_config_var('vendorlib');
if perl_lib_install_dir is None:
perl_lib_install_dir = "${DATADIR}/perl5";
conf.start_msg("PERL_LIB_INSTALL_DIR: ")
conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN')
conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir
perl_inc = read_perl_config_var('print "@INC"')
if '.' in perl_inc:
perl_inc.remove('.')
conf.start_msg("PERL_INC: ")
conf.end_msg("%s" % (perl_inc), 'GREEN')
conf.env.PERL_INC = perl_inc
| kernevil/samba | buildtools/wafsamba/samba_perl.py | Python | gpl-3.0 | 2,115 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_cmppolicy_binding(base_resource) :
""" Binding class showing the cmppolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._sc = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
ur"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_cmppolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_cmppolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbvserver_cmppolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_cmppolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_cmppolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_cmppolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch lbvserver_cmppolicy_binding resources.
"""
try :
obj = lbvserver_cmppolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of lbvserver_cmppolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_cmppolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count lbvserver_cmppolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_cmppolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of lbvserver_cmppolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_cmppolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_cmppolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_cmppolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_cmppolicy_binding = [lbvserver_cmppolicy_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_cmppolicy_binding.py | Python | apache-2.0 | 10,917 |
from ..models import Developer
from os.path import dirname, join
from leancloud import Object
from application.common.util import post_panel_data
from flask import Blueprint, render_template, request, session, redirect, url_for, json
panel = Blueprint('panel', __name__, template_folder='templates')
Installation = Object.extend('BindingInstallation')
@panel.route('/panel/debug', methods=['GET', 'POST'])
def show():
if not session.get('session_token'):
next_url = '/panel/debug'
return redirect(url_for('accounts_bp.login') + '?next=' + next_url)
app_id = session.get('app_id', None)
developer = Developer()
developer.session_token = session.get('session_token')
username = developer.username()
app_list = developer.get_app_list()
tracker_list = developer.get_tracker_of_app(app_id)
s = json.load(file(join(dirname(dirname(__file__)), 'translate.json')))
home_office_type = s.get("home_office_status").keys() + s.get("home_office_status_old").keys()
if request.method == 'POST':
tracker = request.form.get('tracker')
motion_type = request.form.get('motionType')
motion_val = request.form.get('motionVal')
context_type = request.form.get('contextType')
context_val = request.form.get('contextVal')
source = request.form.get('source')
if motion_type and motion_val:
post_panel_data(tracker=tracker, tracker_list=tracker_list,
type="motion", value=motion_val, source=source)
if context_type and context_val and context_val in home_office_type:
post_panel_data(tracker=tracker, tracker_list=tracker_list,
type="home_office_status", value=context_val, source=source)
if context_type and context_val and context_val not in home_office_type:
post_panel_data(tracker=tracker, tracker_list=tracker_list,
type="event", value=context_val, source=source)
motion_list = ['motionSitting', 'motionWalking', 'motionRunning', 'motionBiking', 'motionCommuting']
f = file(join(dirname(dirname(__file__)), 'translate.json'))
context_list = filter(lambda x: str(x) != '', json.load(f).get('context').keys())
return render_template('panel/debug.html', username=username, motion_list=motion_list, context_list=context_list,
tracker_list=tracker_list, app_id=app_id, app_list=app_list)
| petchat/senz.dashboard.backend | application/views/panel.py | Python | mit | 2,465 |
import unittest
from ebird.api.validation import is_subnational2
class IsSubnational2Tests(unittest.TestCase):
"""Tests for the is_subnational2 validation function."""
def test_is_subnational2(self):
self.assertTrue(is_subnational2("US-NV-11"))
def test_invalid_code_is_not_subnational2(self):
self.assertFalse(is_subnational2("US-NV-"))
def test_country_is_not_subnational2(self):
self.assertFalse(is_subnational2("US"))
def test_subnational1_is_not_subnational2(self):
self.assertFalse(is_subnational2("US-NV"))
def test_location_is_not_subnational2(self):
self.assertFalse(is_subnational2("L123456"))
| ProjectBabbler/ebird-api | tests/validation/test_is_subnational2.py | Python | mit | 676 |
import unittest
from rest_framework_cache.utils import get_cache_key, get_all_cache_keys
from rest_framework_cache.registry import cache_registry
from .models import TestModel
from .serializers import TestSerializer
class GetCacheKeyTestCase(unittest.TestCase):
def test_ok(self):
instance = TestModel()
instance.id = 1000
serializer = TestSerializer()
key = get_cache_key(instance, serializer.__class__, 'http')
self.assertEqual(key, "http.tests.TestModel.TestSerializer:1000")
class GetAllCacheKeyTestCase(unittest.TestCase):
def setUp(self):
cache_registry.register(TestSerializer)
def test_ok(self):
instance = TestModel()
instance.id = 1000
keys = get_all_cache_keys(instance)
self.assertEqual(keys, ["http.tests.TestModel.TestSerializer:1000",
"https.tests.TestModel.TestSerializer:1000"])
| ervilis/django-rest-framework-cache | tests/tests_utils.py | Python | gpl-3.0 | 929 |
#!/usr/bin/env python3
"""
Created on 4 Mar 2019
@author: Bruno Beloff ([email protected])
"""
from scs_core.aqcsv.specification.mpc import MPC
from scs_core.data.json import JSONify
# --------------------------------------------------------------------------------------------------------------------
print("list...")
for mpc in MPC.instances():
print(mpc)
print("-")
print("find...")
code = 9
mpc = MPC.instance(code)
print("code:%s mpc:%s" % (code, mpc))
print("-")
code = 2
mpc = MPC.instance(code)
print("code:%s mpc:%s" % (code, mpc))
jdict = mpc.as_json()
print(JSONify.dumps(mpc))
print("-")
remade = MPC.construct_from_jdict(jdict)
print(remade)
equality = remade == mpc
print("remade == mpc: %s" % equality)
print("-")
| south-coast-science/scs_core | tests/aqcsv/specification/mpc_test.py | Python | mit | 761 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_net_path.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from v_net import incorporatePoints
def processCommand(alg):
incorporatePoints(alg)
| sebastic/QGIS | python/plugins/processing/algs/grass7/ext/v_net_path.py | Python | gpl-2.0 | 1,206 |
# -*- coding: utf-8 -*-
"""
@author: Tobias Krauss
"""
from lib.Instruction import Instruction
import lib.PseudoInstruction as PI
import lib.StartVal as SV
from lib.PseudoInstruction import (PseudoInstruction,
PseudoOperand)
from lib.Register import (get_reg_class,
get_size_by_reg,
get_reg_by_size)
def add_ret_pop(inst_lst):
"""
@brief converts the 'pop' instructions of 'vret'
to 'vpop' PseudoInstructions
@param inst_lst List of VmInstructions
@return List of PseudoInstructions
"""
#find ret
ret = []
for vinst in inst_lst:
if vinst.Pseudocode.inst_type == PI.RET_T:
for inst in vinst.all_instructions:
if inst.is_pop() and len(inst) != 1:
p_inst = PseudoInstruction('vpop', vinst.addr,
[make_op(inst, 1, -1)])
ret.append(p_inst)
elif inst.is_pop() and len(inst) == 1:
new_op = PseudoOperand(PI.REGISTER_T,
'flags',
SV.dissassm_type,
'flags')
p_inst = PseudoInstruction('vpopf', vinst.addr,
[new_op])
ret.append(p_inst)
ret.append(vinst.Pseudocode)
else:
ret.append(vinst.Pseudocode)
return ret
def to_vpush(p_lst, start_addr):
"""
@brief Converts the 'push' instructions at the beginning
of the virtual machine function to 'vpush' PseudoInstructions
@param p_lst List of instructions
@param start_addr Address where the PseudoInstruction must be
placed
@return List of PseudoInstructions
"""
ret = []
wrote_values = {}
for inst in p_lst:
if not inst.is_push():
if inst.is_mov():
wrote_values[inst.get_op_str(1)] = inst.get_op_str(2)
continue
print inst
if len(inst) != 1:
if inst.op_is_mem(1):
if inst.is_rip_rel():
disp = inst.get_op_disp(1)
disp += inst.addr + inst.opcode_len
new_op = PseudoOperand(PI.MEMORY_T,
'[{0:#x}]'.format(disp),
inst.get_op_size(1),
'', None)
else:
new_op = PseudoOperand(PI.MEMORY_T,
inst.get_op_str(1),
inst.get_op_size(1),
'', None)
ret.append(PseudoInstruction('vpush',
start_addr,
[new_op]))
elif inst.op_is_mem_abs(1):
new_op = PseudoOperand(PI.MEMORY_T,
inst.get_op_str(1),
inst.get_op_size(1),
'', None)
ret.append(PseudoInstruction('vpush',
start_addr,
[new_op]))
elif inst.op_is_reg(1):
wrote_value = False
if inst.get_op_str(1) in wrote_values:
new_op = PseudoOperand(PI.IMMEDIATE_T,
wrote_values[inst.get_op_str(1)],
inst.get_op_size(1),
int(wrote_values[inst.get_op_str(1)], 16))
ret.append(PseudoInstruction('vpush',
start_addr,
[new_op]))
else:
new_op = PseudoOperand(PI.REGISTER_T,
inst.get_op_str(1),
inst.get_op_size(1),
inst.get_reg_name(1))
ret.append(PseudoInstruction('vpush',
start_addr,
[new_op]))
elif inst.op_is_imm(1):
new_op = PseudoOperand(PI.IMMEDIATE_T,
inst.get_op_str(1),
inst.get_op_size(1), '')
ret.append(PseudoInstruction('vpush',
start_addr,
[new_op]))
else:
new_op = PseudoOperand(PI.REGISTER_T, 'flags',
SV.dissassm_type, 'flags')
p_inst = PseudoInstruction('vpushf', start_addr, [new_op])
ret.append(p_inst)
return ret
def make_op(inst, op, catch_value):
"""
@brief convert operands to PseudoOperands
@param inst Instruction with the Operand
@param op number of op; op = 1 for first operand
@param catch_value Value from the obfuscated code
@return PseudoOperand
"""
if(inst.get_op_str(op) == None):
return None
if inst.op_is_mem(op):
return PseudoOperand(PI.MEMORY_T, inst.get_op_str(op),
inst.get_op_size(op), inst.get_reg_name(op),
catch_value)
elif inst.op_is_reg(op):
return PseudoOperand(PI.REGISTER_T, inst.get_op_str(op),
inst.get_op_size(op), inst.get_reg_name(op))
elif inst.op_is_imm(op):
return PseudoOperand(PI.IMMEDIATE_T, inst.get_op_str(op),
inst.get_op_size(op), inst.get_op_value(op))
else:
return None
def extend_signed_catch_val(reg, catch_value):
"""
@brief Sign extends catch_value
@param register Register which contains the catch_value
@param catch_value Value catched form obfuscated code
@return Sign extended catch_value
"""
reg_size = get_size_by_reg(reg)
if reg_size == 8 and catch_value > 0x79:
if SV.dissassm_type == SV.ASSEMBLER_32:
catch_value = 0xffffff00 + catch_value
elif SV.dissassm_type == SV.ASSEMBLER_64:
catch_value = 0xffffffffffffff00 + catch_value
elif reg_size == 16 and catch_value > 0x7900:
if SV.dissassm_type == SV.ASSEMBLER_32:
catch_value = 0xffff0000 + catch_value
elif SV.dissassm_type == SV.ASSEMBLER_64:
catch_value = 0xffffffffffff0000 + catch_value
elif reg_size == 32 and catch_value > 0x79000000:
#there is nothing to do for 32bit
if SV.dissassm_type == SV.ASSEMBLER_64:
catch_value = 0xffffffff00000000 + catch_value
#there is nothing to do for reg_size == 64
return catch_value
class VmInstruction(object):
"""
@brief Converts the exectued x86 code to the corresponding PseudoInstruction
"""
def __init__(self, instr_lst, catch_value, catch_reg, inst_addr):
"""
@param instr_lst List of x86 instructions
@param catch_value Value that is catched from the virtual code
or None if there is no value catched
@param catch_reg Register in which the catch_value is moved
@param inst_addr Address of the VmInstruction
"""
self.all_instructions = instr_lst
self.Vinstructions = []
self.Instructions = []
self.is_signed = False
for inst in instr_lst:
if inst.is_vinst():
self.Vinstructions.append(inst)
else:
self.Instructions.append(inst)
self.Pseudocode = None
self.catch_value = catch_value
self.catch_reg = catch_reg
self.addr = inst_addr
if not self.get_pseudo_code():
mnem_str = ''
for inst in self.all_instructions:
mnem_str += str(inst)
self.Pseudocode= PI.PseudoInstruction(mnem_str, inst_addr, [], 0, PI.UNDEF_T)
print 'Did not find pseudocode at addr: {0:#x}'.format(inst_addr)
def __str__(self):
if self.Pseudocode is not None:
return str(self.Pseudocode)
else:
inst_str = ''
for item in self.all_instructions:
inst_str = inst_str + str(item) + '\n'
return inst_str
def replace_catch_reg(self):
"""
@brief replace the catch_register with its catch_value
"""
if (self.catch_reg == ''):
return
if self.is_signed:
self.catch_value = extend_signed_catch_val(self.catch_reg, self.catch_value)
self.Pseudocode.replace_reg_class(self.catch_reg, self.catch_value)
def get_pseudo_code(self):
"""
@brief tests if its a known VmInstruction
@remark Those tests set the Pseudocode variable with the
corresponding PseudoInstruction
"""
if (self.is_push() or
self.is_pop()):
self.replace_catch_reg()
return True
elif (self.is_nor() or
self.is_add() or
self.is_jmp() or
self.is_write() or
self.is_read() or
self.is_shift_right() or
self.is_shift_left() or
self.is_shld() or
self.is_shrd() or
self.is_vcall() or
self.is_mov_ebp() or
self.is_vret() or
self.is_imul() or
self.is_idiv()):
return True
else:
return False
###########################
# helper functions #
###########################
def get_previous(self, method, pos):
"""
@brief Find previous instruction for which method evaluates True
@param method Evaluation method
@param pos Last position
"""
pos_lst = []
for prev_pos, inst in enumerate(self.Instructions):
if (prev_pos < pos) and method(inst):
pos_lst.append(prev_pos)
return pos_lst
def get_subsequent(self, method, pos):
"""
@brief Find subsequent instruction for which method evaluates True
@param method Evaluation method
@param pos First position
"""
pos_lst = []
for subs_pos, inst in enumerate(self.Instructions):
if (subs_pos > pos) and method(inst):
pos_lst.append(subs_pos)
return pos_lst
########################
# decision functions #
########################
def is_push(self):
"""
@brief Tests if the VmInstruction is a 'vpush'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.Instructions):
if(inst.is_sub_basepointer()):
break
if(get_reg_class(self.catch_reg) == get_reg_class('eax') and
(inst.is_cwde() or inst.is_cbw() or inst.is_cdqe())):
self.is_signed = True
else : # no break
return False
pos_pmov_lst = self.get_subsequent(Instruction.is_write_stack, pos)
if len(pos_pmov_lst) != 1:
return False
push_inst = self.Instructions[pos_pmov_lst[0]]
pos_mov_lst = self.get_previous(Instruction.is_mov, pos)
push_op = make_op(push_inst, 2, self.catch_value)
for pos_mov in pos_mov_lst:
pos_mov_inst = self.Instructions[pos_mov]
if pos_mov_inst.is_read_stack():
return False
if((get_reg_class(push_inst.get_op_str(2)) ==
get_reg_class(pos_mov_inst.get_op_str(1))) and
get_reg_class(push_inst.get_op_str(2)) != None): # too strong condition
push_op = make_op(pos_mov_inst, 2, self.catch_value)
sub_value = self.Instructions[pos].get_op_value(2)
self.Pseudocode = PseudoInstruction('vpush', self.addr, [push_op], sub_value)
return True
# control in comp.vmp loc4041c8
# size von holen und add sub gleich?
def is_pop(self):
"""
@brief Tests if the VmInstruction is a 'vpop'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.Instructions):
if(inst.is_add_basepointer()):
break
else : # no break
return False
pos_pmov_lst = self.get_previous(Instruction.is_read_stack, pos)
if len(pos_pmov_lst) == 0:
return False
for ppos in pos_pmov_lst:
pop_inst = self.Instructions[ppos] # get last pop_mov inst in case there are more
pop_op = make_op(pop_inst, 1, self.catch_value)
pos_mov_lst = self.get_subsequent(Instruction.is_mov, pos)
op_pos = ppos
for pos_mov in pos_mov_lst:
pos_mov_inst = self.Instructions[pos_mov]
if(pos_mov_inst.is_write_stack()):
return False
if((get_reg_class(pop_inst.get_op_str(1)) ==
get_reg_class(pos_mov_inst.get_op_str(2))) and
get_reg_class(pop_inst.get_op_str(1))): #maybe too weak
pop_op = make_op(pos_mov_inst, 1, self.catch_value)
op_pos = pos_mov
if(not self.Instructions[op_pos].op_is_mem(1)):
return False
add_value = self.Instructions[pos].get_op_value(2)
self.Pseudocode = PseudoInstruction('vpop', self.addr,
[pop_op], add_value)
#print 'vpop'
return True
#TODO add with two regs
def is_add(self):
"""
@brief Tests if the VmInstruction is a 'vadd'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.Instructions):
if(inst.is_add() and not inst.op_is_imm(2)):
break
else: # no break
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
# mit opstr?
opstr = self.Instructions[pos].get_op_str(2)
for pos0 in pos_mov:
if opstr == self.Instructions[pos0].get_op_str(1):
self.Pseudocode = PseudoInstruction('vadd', self.addr,
[make_op(self.Instructions[pos], 1, self.catch_value),
make_op(self.Instructions[pos0], 2, self.catch_value)], SV.dissassm_type / 8)
break
else:
return False
return True
def is_nor(self):
"""
@brief Tests if the VmInstruction is a 'vnor'.
If True sets the PseudoInstruction
"""
# 1. search for and with 2 different registers
and_found = False
reg0 = ''
reg1 = ''
and_size = 0
for pos, inst in enumerate(self.Instructions):
if inst.is_and():
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
and_size = inst.get_mov_size()
if reg0 != reg1:
and_found = True
break
if not and_found:
return False
pos_not = self.get_previous(Instruction.is_not, pos)
#if len(pos_not) < 1 or len(pos_not) > 2:
# return False
not_size = 0
for posn in pos_not:
not_size += (self.Instructions[posn].Instruction.operands[0].size / 8)
if(not_size != 2 * and_size):
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
#if len(pos_mov) != 2:
# return False
mov_r0 = False
mov_r1 = False
op1 = make_op(self.Instructions[pos], 1, self.catch_value)
op2 = make_op(self.Instructions[pos], 2, self.catch_value)
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
if mov_r0:
op1 = make_op(self.Instructions[pos_reg0], 2, self.catch_value)
if mov_r1:
op2 = make_op(self.Instructions[pos_reg1], 2, self.catch_value)
#quick fix correct !!!
if(op1.register == 'ebp') and (and_size == 2):
op1 = op1.replace('+0x4', '+0x2')
self.Pseudocode = PseudoInstruction('vnor', self.addr, [op1, op2], and_size)
return True
def is_jmp(self):
"""
@brief Tests if the VmInstruction is a 'vjmp'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.all_instructions):
if(inst.is_add_basepointer()):
break
else : # no break
return False
prev_pos = 0
while prev_pos < pos:
if self.all_instructions[prev_pos].is_isp_mov():
break
prev_pos = prev_pos + 1
else: # no break
return False
add_value = self.all_instructions[pos].get_op_value(2)
self.Pseudocode = PseudoInstruction(
'vjmp', self.addr,
[make_op(self.all_instructions[prev_pos], 2, self.catch_value)], add_value)
return True
def is_write(self):
"""
@brief Tests if the VmInstruction is a 'vwrite'.
If True sets the PseudoInstruction
"""
reg0 = ''
reg1 = ''
mov_size = 0
sub_size = 0
for pos, inst in enumerate(self.all_instructions):
if inst.op_is_mem(1) and not inst.is_write_stack():
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
mov_size = inst.get_mov_size()
break
else: # no break
return False
for subpos, inst in enumerate(self.Instructions):
if(inst.is_add_basepointer()):
sub_size = inst.get_op_value(2)
break
else : # no break
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
mov_r0 = False
mov_r1 = False
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
if mov_r0 and mov_r1:
op1_inst = self.Instructions[pos_reg0]
op1 = PseudoOperand(PI.REFERENCE_T, op1_inst.get_op_str(2),
op1_inst.get_op_size(2), op1_inst.get_reg_name(2))
op2 = make_op(self.Instructions[pos_reg1], 2, self.catch_value)
self.Pseudocode = PseudoInstruction('vwrite', self.addr,
[op1, op2], mov_size, PI.WRITE_T, PI.IN2_OUT0, sub_size)
return True
else:
return False
def is_read(self):
"""
@brief Tests if the VmInstruction is a 'vread'.
If True sets the PseudoInstruction
"""
reg0 = ''
reg1 = ''
mov_size = 0
for pos, inst in enumerate(self.all_instructions):
if inst.op_is_mem(2) and not inst.is_read_stack():
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
mov_size = inst.get_mov_size()
break
else: # no break
return False
prev_mov = self.get_previous(Instruction.is_mov, pos)
post_mov = self.get_subsequent(Instruction.is_mov, pos)
for prev_pos in prev_mov:
if(get_reg_class(reg1) ==
get_reg_class(self.Instructions[prev_pos].get_reg_name(1))):
break
else: # no break
return False
for post_pos in post_mov:
if(get_reg_class(reg0) ==
get_reg_class(self.Instructions[post_pos].get_reg_name(2))):
push_size = self.Instructions[post_pos].get_mov_size()
break
else: # no break
return False
# wta = write to address
#if mov_size == 1:
op1 = make_op(self.Instructions[post_pos], 1, self.catch_value)
op2_inst = self.Instructions[prev_pos]
op2 = PseudoOperand(PI.REFERENCE_T, op2_inst.get_op_str(2),
op2_inst.get_op_size(2), op2_inst.get_reg_name(2))
self.Pseudocode = PseudoInstruction('vread', self.addr,
[op1, op2], mov_size, PI.READ_T, PI.IN1_OUT1 , push_size)
return True
def is_shift_right(self):
"""
@brief Tests if the VmInstruction is a 'vshr'.
If True sets the PseudoInstruction
"""
# 1. search for and with 2 different registers
and_found = False
reg0 = ''
reg1 = ''
for pos, inst in enumerate(self.Instructions):
if inst.is_shr() and inst.op_is_reg(1) and inst.op_is_reg(2):
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
if reg0 != reg1:
and_found = True
break
if not and_found:
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
if len(pos_mov) != 2:
return False
mov_r0 = False
mov_r1 = False
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
post_mov = self.get_subsequent(Instruction.is_mov, pos)
for save_mov in post_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[save_mov].get_reg_name(2))):
ret_size = self.Instructions[save_mov].get_mov_size()
break
else: # no break
return False
if mov_r0 and mov_r1:
# TODO byte word usw...
self.Pseudocode = PseudoInstruction('vshr', self.addr,
[make_op(self.Instructions[pos_reg0], 2, self.catch_value),
make_op(self.Instructions[pos_reg1], 2, self.catch_value)],
ret_size)
return True
else:
return False
def is_shift_left(self):
"""
@brief Tests if the VmInstruction is a 'vshl'.
If True sets the PseudoInstruction
"""
# 1. search for and with 2 different registers
and_found = False
reg0 = ''
reg1 = ''
for pos, inst in enumerate(self.Instructions):
if inst.is_shl() and inst.op_is_reg(1) and inst.op_is_reg(2):
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
if reg0 != reg1:
and_found = True
break
if not and_found:
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
if len(pos_mov) != 2:
return False
mov_r0 = False
mov_r1 = False
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
post_mov = self.get_subsequent(Instruction.is_mov, pos)
for save_mov in post_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[save_mov].get_reg_name(2))):
ret_size = self.Instructions[save_mov].get_mov_size()
break
else: # no break
return False
if mov_r0 and mov_r1:
# TODO byte word usw...
self.Pseudocode = PseudoInstruction('vshl', self.addr,
[make_op(self.Instructions[pos_reg0], 2, self.catch_value),
make_op(self.Instructions[pos_reg1], 2, self.catch_value)],
ret_size)
return True
else:
return False
def is_shrd(self):
"""
@brief Tests if the VmInstruction is a 'vshrd'.
If True sets the PseudoInstruction
"""
and_found = False
reg0 = ''
reg1 = ''
reg2 = ''
for pos, inst in enumerate(self.Instructions):
if (inst.is_shrd() and inst.op_is_reg(1) and inst.op_is_reg(2)
and inst.op_is_reg(3)):
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
reg2 = inst.get_reg_name(3)
if reg0 != reg1:
and_found = True
break
if not and_found:
return False
prev_mov = self.get_previous(Instruction.is_mov, pos)
for prev_pos0 in prev_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[prev_pos0].get_reg_name(1))):
break
else: # no break
return False
for prev_pos1 in prev_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[prev_pos1].get_reg_name(1))):
break
else: # no break
return False
for prev_pos2 in prev_mov:
if (get_reg_class(reg2) ==
get_reg_class(self.Instructions[prev_pos2].get_reg_name(1))):
break
else: # no break
return False
self.Pseudocode = PseudoInstruction('vshrd', self.addr,
[make_op(self.Instructions[prev_pos0], 2, self.catch_value),
make_op(self.Instructions[prev_pos1], 2, self.catch_value),
make_op(self.Instructions[prev_pos2], 2, self.catch_value)])
return True
def is_shld(self):
"""
@brief Tests if the VmInstruction is a 'vshld'.
If True sets the PseudoInstruction
"""
and_found = False
reg0 = ''
reg1 = ''
reg2 = ''
for pos, inst in enumerate(self.Instructions):
if (inst.is_shld() and inst.op_is_reg(1) and inst.op_is_reg(2)
and inst.op_is_reg(3)):
reg0 = inst.get_reg_name(1)
reg1 = inst.get_reg_name(2)
reg2 = inst.get_reg_name(3)
if reg0 != reg1:
and_found = True
break
if not and_found:
return False
prev_mov = self.get_previous(Instruction.is_mov, pos)
for prev_pos0 in prev_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[prev_pos0].get_reg_name(1))):
break
else: # no break
return False
for prev_pos1 in prev_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[prev_pos1].get_reg_name(1))):
break
else: # no break
return False
for prev_pos2 in prev_mov:
if (get_reg_class(reg2) ==
get_reg_class(self.Instructions[prev_pos2].get_reg_name(1))):
break
else: # no break
return False
self.Pseudocode = PseudoInstruction('vshld', self.addr,
[make_op(self.Instructions[prev_pos0], 2, self.catch_value),
make_op(self.Instructions[prev_pos1], 2, self.catch_value),
make_op(self.Instructions[prev_pos2], 2, self.catch_value)])
return True
def is_vcall(self):
"""
@brief Tests if the VmInstruction is a 'vcall'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.Instructions):
if(inst.is_call()):
break
else : # no break
return False
op1 = self.Instructions[pos].get_op_str(1)
prev_mov = self.get_previous(Instruction.is_mov, pos)
for prev_pos in prev_mov:
if (get_reg_class(self.Instructions[pos].get_reg_name(1)) ==
get_reg_class(self.Instructions[prev_pos].get_reg_name(1))):
op1 = make_op(self.Instructions[prev_pos], 2, self.catch_value)
self.Pseudocode = PseudoInstruction('vcall', self.addr, [op1])
return True
def is_vret(self):
"""
@brief Tests if the VmInstruction is a 'vret'.
If True sets the PseudoInstruction
"""
for pos, inst in enumerate(self.Instructions):
if(inst.is_ret()):
break
else : # no break
return False
self.Pseudocode = PseudoInstruction('vret', self.addr)
return True
def is_mov_ebp(self):
"""
@brief Tests if the VmInstruction is a 'vebp_mov'.
If True sets the PseudoInstruction
"""
op1 = ''
op2 = ''
for pos, inst in enumerate(self.Instructions):
if(inst.is_mov() and
get_reg_class(inst.get_reg_name(1)) == get_reg_class('ebp') and
get_reg_class(inst.get_reg_name(2)) == get_reg_class('ebp')):
op1 = make_op(inst, 1, self.catch_value)
op2 = make_op(inst, 2, self.catch_value)
break
else : # no break
return False
self.Pseudocode = PseudoInstruction('vebp_mov', self.addr, [op1, op2])
return True
def is_imul(self):
"""
@brief Tests if the VmInstruction is a 'vimul'.
If True sets the PseudoInstruction
"""
reg0 = ''
reg1 = ''
mul_found = False
for pos, inst in enumerate(self.Instructions):
if (inst.is_imul() and inst.op_is_reg(1)):
reg0 = inst.get_reg_name(1)
if inst.get_reg_name(2) == None:
reg1 = get_reg_by_size(get_reg_class('eax'), SV.dissassm_type)
else:
reg1 = inst.get_reg_name(2)
if reg0 != reg1:
mul_found = True
break
if not mul_found:
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
if mov_r0 and mov_r1:
self.Pseudocode = PseudoInstruction('vimul', self.addr,
[make_op(self.Instructions[pos_reg0], 2, self.catch_value),
make_op(self.Instructions[pos_reg1], 2, self.catch_value)],
SV.dissassm_type / 8, PI.IMUL_T, PI.IN2_OUT3)
return True
else:
return False
def is_idiv(self):
"""
@brief Tests if the VmInstruction is a 'vimul'.
If True sets the PseudoInstruction
"""
reg0 = ''
reg1 = ''
op_name = ''
div_found = False
for pos, inst in enumerate(self.Instructions):
if (inst.is_idiv()):
reg0 = get_reg_by_size(get_reg_class('eax'), SV.dissassm_type)
reg1 = get_reg_by_size(get_reg_class('edx'), SV.dissassm_type)
op_name = inst.get_op_str(1)
div_found = True
if not div_found:
return False
pos_mov = self.get_previous(Instruction.is_mov, pos)
for pos_reg0 in pos_mov:
if (get_reg_class(reg0) ==
get_reg_class(self.Instructions[pos_reg0].get_reg_name(1))):
mov_r0 = True
break
for pos_reg1 in pos_mov:
if (get_reg_class(reg1) ==
get_reg_class(self.Instructions[pos_reg1].get_reg_name(1))):
mov_r1 = True
break
if mov_r0 and mov_r1:
self.Pseudocode = PseudoInstruction('vidiv', self.addr,
[make_op(self.Instructions[pos_reg0], 2, self.catch_value),
make_op(self.Instructions[pos_reg1], 2, self.catch_value),
make_op(self.Instructions[pos], 1, self.catch_value)],
SV.dissassm_type / 8, PI.DIV_T, PI.IN3_OUT3)
return True
else:
return False | anatolikalysch/VMAttack | lib/VmInstruction.py | Python | mit | 33,455 |
__all__ = ["Capabilities", \
"Switches"]
from browser.status import *
from base.bind import Bind
from base.log import VLOG
class Switches(object):
def __init__(self):
self.switch_map = {}
def SetSwitch(self, name, value=""):
self.switch_map[name] = value
# In case of same key, |switches| will override.
def SetFromSwitches(self, switches):
for key, value in switches.switch_map.iteritems():
self.switch_map[key] = value
# Sets a switch from the capabilities, of the form [--]name[=value].
def SetUnparsedSwitch(self, unparsed_switch):
equals_index = unparsed_switch.find('=')
if equals_index != -1:
value = unparsed_switch[equals_index + 1:]
start_index = 0
if unparsed_switch[:2] == "--":
start_index = 2
name = unparsed_switch[start_index:equals_index]
self.SetSwitch(name, value)
def RemoveSwitch(self, name):
del self.switch_map[name]
def HasSwitch(self, name):
return self.switch_map.has_key[name]
def GetSwitchValue(self, name):
if name not in self.switch_map:
return ""
return self.switch_map[name]
def GetSwitchValueNative(self, name):
if name not in self.switch_map:
return ""
return self.switch_map[name]
def GetSize(self):
return len(self.switch_map)
def ToString(self):
string = ""
for key, value in self.switch_map.iteritems():
string += "--" + key
if len(value):
if value.find(" ") != -1:
value = "true"
string += "=" + value + " "
return string
def ParseBoolean(member, option, capabilities):
if type(option) != bool:
return Status(kUnknownError, "must be a boolean")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseString(member, option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
elif not option:
return Status(kUnknownError, "cannot be empty")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def IgnoreCapability(option, capabilities):
return Status(kOk)
def ParseTizenXwalk(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be 'host:port'")
values = option.split(":")
if len(values) != 2:
return Status(kUnknownError, "must be 'host:port'")
port = int(values[1])
if port <= 0:
return Status(kUnknownError, "port must be > 0")
# TODO: I make debugger_address equal to "host:port" in string type
capabilities.tizen_debugger_address = option
return Status(kOk)
def ParseUseExistingBrowser(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be 'host:port'")
values = option.split(":")
if len(values) != 2:
return Status(kUnknownError, "must be 'host:port'")
port = int(values[1])
if port <= 0:
return Status(kUnknownError, "port must be > 0")
# TODO: I make debugger_address equal to "host:port" in string type
capabilities.debugger_address = option
return Status(kOk)
def ParseFilePath(member, option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseDict(member, option, capabilities):
if type(option) != dict:
return Status(kUnknownError, "must be a dictionary")
elif not hasattr(capabilities, member):
return Status(kUnknownError, "has no such member variety")
else:
setattr(capabilities, member, option)
return Status(kOk)
def ParseLogPath(option, capabilities):
if type(option) != str and type(option) != unicode:
return Status(kUnknownError, "must be a string")
else:
capabilities.log_path = option
return Status(kOk)
def ParseExtensions(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for extension in option:
if type(extension) != str and type(extension) != unicode:
return Status(StatusCode.kUnknownError, "each extension must be a base64 encoded string")
capabilities.extensions.append(extension)
return Status(kOk)
def IgnoreDeprecatedOption(option_name, option, capabilities):
VLOG(2, "Deprecated xwalk option is ignored: " + option)
return Status(kOk)
def ParseExcludeSwitches(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for switch_name in option:
if type(switch_name) != str and type(switch_name) != unicode:
return Status(kUnknownError, "each switch to be removed must be a string")
capabilities.exclude_switches.add(switch_name)
return Status(kOk)
def ParseSwitches(option, capabilities):
if type(option) != list:
return Status(kUnknownError, "must be a list")
for arg_string in option:
if type(arg_string) != str and type(arg_string) != unicode:
return Status(kUnknownError, "each argument must be a string")
capabilities.switches.SetUnparsedSwitch(arg_string);
return Status(kOk)
def ParseXwalkOptions(capability, capabilities):
if type(capability) != dict:
return Status(kUnknownError, "must be a dictionary")
is_android = capability.has_key("androidPackage")
is_existing = capability.has_key("debuggerAddress")
is_tizen = capability.has_key("tizenDebuggerAddress")
parser_map = {}
# Ignore 'args', 'binary' and 'extensions' capabilities by default, since the
# Java client always passes them.
parser_map["args"] = Bind(IgnoreCapability)
parser_map["binary"] = Bind(IgnoreCapability)
parser_map["extensions"] = Bind(IgnoreCapability)
if is_android:
parser_map["androidActivity"] = Bind(ParseString, ["android_activity", capability.get("androidActivity"), capabilities])
parser_map["androidDeviceSerial"] = Bind(ParseString, ["android_device_serial", capability.get("androidDeviceSerial"), capabilities])
parser_map["androidPackage"] = Bind(ParseString, ["android_package", capability.get("androidPackage"), capabilities])
parser_map["androidProcess"] = Bind(ParseString, ["android_process", capability.get("androidProcess"), capabilities])
parser_map["androidUseRunningApp"] = Bind(ParseBoolean, ["android_use_running_app", capability.get("androidUseRunningApp"), capabilities])
parser_map["args"] = Bind(ParseSwitches, [capability.get("args"), capabilities])
parser_map["loadAsync"] = Bind(IgnoreDeprecatedOption, ["loadAsync", capability.get("loadAsync"), capabilities])
elif is_tizen:
parser_map["tizenDebuggerAddress"] = Bind(ParseTizenXwalk, [capability.get("tizenDebuggerAddress"), capabilities])
parser_map["tizenAppId"] = Bind(ParseString, ["tizen_app_id", capability.get("tizenAppId"), capabilities])
parser_map["tizenAppName"] = Bind(ParseString, ["tizen_app_name", capability.get("tizenAppName"), capabilities])
parser_map["tizenDeviceSerial"] = Bind(ParseString, ["tizen_device_serial", capability.get("tizenDeviceSerial"), capabilities])
parser_map["tizenUseRunningApp"] = Bind(ParseBoolean, ["tizen_use_running_app", capability.get("tizenUseRunningApp"), capabilities])
elif is_existing:
parser_map["debuggerAddress"] = Bind(ParseUseExistingBrowser, [capability.get("debuggerAddress"), capabilities])
else:
parser_map["args"] = Bind(ParseSwitches, [capability.get("args"), capabilities])
parser_map["binary"] = Bind(ParseFilePath, ["binary", capability.get("binary"), capabilities])
parser_map["detach"] = Bind(ParseBoolean, ["detach", capability.get("detach"), capabilities])
parser_map["excludeSwitches"] = Bind(ParseExcludeSwitches, [capability.get("excludeSwitches"), capabilities])
parser_map["extensions"] = Bind(ParseExtensions, [capability.get("extensions"), capabilities])
parser_map["forceDevToolsScreenshot"] = Bind(ParseBoolean, ["force_devtools_screenshot", capability.get("forceDevToolsScreenshot"), capabilities])
parser_map["loadAsync"] = Bind(IgnoreDeprecatedOption, ["loadAsync", capability.get("loadAsync"), capabilities])
parser_map["localState"] = Bind(ParseDict, ["local_state", capability.get("localState"), capabilities])
parser_map["logPath"] = Bind(ParseLogPath, [capability.get("logPath"), capabilities])
parser_map["minidumpPath"] = Bind(ParseString, ["minidump_path", capability.get("minidumpPath"), capabilities])
parser_map["prefs"] = Bind(ParseDict, ["prefs", capability.get("prefs"), capabilities])
for key, value in capability.iteritems():
if capability.get(key, None) != None:
status = parser_map[key].Run()
if status.IsError():
VLOG(0, "error parse xwalk option: " + key)
return Status(kUnknownError, "cannot parse " + key)
return Status(kOk)
def ParseProxy(option, capabilities):
proxy_dict = option
if type(proxy_dict) != dict:
return Status(kUnknownError, "must be a dictionary")
proxy_type = proxy_dict.get("proxyType")
#if type(proxy_type) != str and type(proxy_type) != unicode:
if type(proxy_type) != str:
return Status(kUnknownError, "'proxyType' must be a string")
proxy_type.lower()
if proxy_type == "direct":
capabilities.switches.SetSwitch("no-proxy-server")
elif proxy_type == "system":
# Xwalk default.
pass
elif proxy_type == "pac":
proxy_pac_url = proxy_dict.get("proxyAutoconfigUrl")
#if type(proxy_pac_url) != str and type(proxy_pac_url) != unicode:
if type(proxy_pac_url) != str:
return Status(kUnknownError, "'proxyAutoconfigUrl' must be a string")
capabilities.switches.SetSwitch("proxy-pac-url", proxy_pac_url)
elif proxy_type == "autodetect":
capabilities.switches.SetSwitch("proxy-auto-detect")
elif proxy_type == "manual":
proxy_servers_options = [
["ftpProxy", "ftp"], ["httpProxy", "http"], ["sslProxy", "https"]]
option_value = ""
proxy_servers = ""
for item in proxy_servers_options:
option_value = proxy_dict.get(item[0], None)
if option_value == None:
continue
value = option_value
if type(value) != str and type(value) != unicode:
return Status(kUnknownError, item[0] + " must be a string")
# Converts into Xwalk proxy scheme.
# Example: "http=localhost:9000;ftp=localhost:8000".
if proxy_servers:
proxy_servers += ";"
proxy_servers += item[1] + "=" + value
proxy_bypass_list = ""
option_value = proxy_dict.get("noProxy", None)
if option_value != None:
proxy_bypass_list = option_value
#if type(proxy_bypass_list) != str and type(proxy_bypass_list) != unicode:
if type(proxy_bypass_list) != str:
return Status(kUnknownError, "'noProxy' must be a string")
if not proxy_servers and not proxy_bypass_list:
return Status(kUnknownError, "proxyType is 'manual' but no manual proxy capabilities were found")
if proxy_servers:
capabilities.switches.SetSwitch("proxy-server", proxy_servers)
if proxy_bypass_list:
capabilities.switches.SetSwitch("proxy-bypass-list", proxy_bypass_list)
else:
return Status(kUnknownError, "unrecognized proxy type:" + proxy_type)
return Status(kOk)
class Capabilities(object):
def __init__(self):
self.android_activity = ""
self.android_device_serial = ""
self.android_package = ""
self.android_process = ""
self.android_use_running_app =False
self.tizen_debugger_address = None
self.tizen_app_id = ""
self.tizen_app_name = ""
self.tizen_device_serial = ""
self.tizen_use_running_app = False
self.binary = ""
# If provided, the remote debugging address to connect to.
self.debugger_address = None
# Whether the lifetime of the started Xwalk browser process should be
# bound to XwalkDriver's process. If true, Xwalk will not quit if
# XwalkDriver dies.
self.detach = False
# Set of switches which should be removed from default list when launching
# Xwalk.
self.exclude_switches = set()
self.extensions = []
# True if should always use DevTools for taking screenshots.
# This is experimental and may be removed at a later point.
self.force_devtools_screenshot = False
self.local_state = {}
self.log_path = ""
self.logging_prefs = {}
# If set, enable minidump for xwalk crashes and save to this directory.
self.minidump_path = ""
self.prefs = {}
self.switches = Switches()
# Return true if existing host:port session is to be used.
def IsExistingBrowser(self):
return self.debugger_address > 0 and self.debugger_address < 65536
# Return true if android package is specified.
def IsAndroid(self):
return self.android_package != ""
# Return true if tizen package is specified.
def IsTizen(self):
return self.tizen_debugger_address > 0 and self.tizen_debugger_address < 65536
def Parse(self, desired_caps):
parser_map = {}
if desired_caps.get("xwalkOptions", None) != None:
parser_map["xwalkOptions"] = Bind(ParseXwalkOptions, [desired_caps["xwalkOptions"], self])
if desired_caps.get("loggingPrefs", None) != None:
parser_map["loggingPrefs"] = Bind(ParseLoggingPrefs, [desired_caps["loggingPrefs"], self])
if desired_caps.get("proxy", None) != None:
parser_map = Bind(ParseProxy, [desired_caps["proxy"], self])
for label, cmd in parser_map.iteritems():
status = cmd.Run()
if status.IsError():
return Status(kUnknownError, "cannot parse capability: " + label)
return Status(kOk)
| PeterWangIntel/crosswalk-webdriver-python | misc/capabilities.py | Python | bsd-3-clause | 13,867 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Now with 30% more starch.
"""
from __future__ import generators
import hmac
from zope import interface
from twisted.trial import unittest
from twisted.cred import portal, checkers, credentials, error
from twisted.python import components
from twisted.python import util
from twisted.internet import defer
from twisted.internet.defer import deferredGenerator as dG, waitForDeferred as wFD
try:
from crypt import crypt
except ImportError:
crypt = None
try:
from twisted.cred.pamauth import callIntoPAM
except ImportError:
pamauth = None
else:
from twisted.cred import pamauth
class ITestable(components.Interface):
pass
class TestAvatar:
def __init__(self, name):
self.name = name
self.loggedIn = False
self.loggedOut = False
def login(self):
assert not self.loggedIn
self.loggedIn = True
def logout(self):
self.loggedOut = True
class Testable(components.Adapter):
interface.implements(ITestable)
# components.Interface(TestAvatar).adaptWith(Testable, ITestable)
components.registerAdapter(Testable, TestAvatar, ITestable)
class TestRealm:
interface.implements(portal.IRealm)
def __init__(self):
self.avatars = {}
def requestAvatar(self, avatarId, mind, *interfaces):
if self.avatars.has_key(avatarId):
avatar = self.avatars[avatarId]
else:
avatar = TestAvatar(avatarId)
self.avatars[avatarId] = avatar
avatar.login()
return (interfaces[0], components.getAdapter(avatar, interfaces[0]),
avatar.logout)
class NewCredTest(unittest.TestCase):
def setUp(self):
r = self.realm = TestRealm()
p = self.portal = portal.Portal(r)
up = self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
up.addUser("bob", "hello")
p.registerChecker(up)
def testListCheckers(self):
expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]
got = self.portal.listCredentialsInterfaces()
expected.sort()
got.sort()
self.assertEquals(got, expected)
def testBasicLogin(self):
l = []; f = []
self.portal.login(credentials.UsernamePassword("bob", "hello"),
self, ITestable).addCallback(
l.append).addErrback(f.append)
if f:
raise f[0]
# print l[0].getBriefTraceback()
iface, impl, logout = l[0]
# whitebox
self.assertEquals(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def testFailedLogin(self):
l = []
self.portal.login(credentials.UsernamePassword("bob", "h3llo"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.failUnlessEqual(error.UnauthorizedLogin, l[0])
def testFailedLoginName(self):
l = []
self.portal.login(credentials.UsernamePassword("jay", "hello"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.failUnlessEqual(error.UnauthorizedLogin, l[0])
class CramMD5CredentialsTestCase(unittest.TestCase):
def testIdempotentChallenge(self):
c = credentials.CramMD5Credentials()
chal = c.getChallenge()
self.assertEquals(chal, c.getChallenge())
def testCheckPassword(self):
c = credentials.CramMD5Credentials()
chal = c.getChallenge()
c.response = hmac.HMAC('secret', chal).hexdigest()
self.failUnless(c.checkPassword('secret'))
def testWrongPassword(self):
c = credentials.CramMD5Credentials()
self.failIf(c.checkPassword('secret'))
class OnDiskDatabaseTestCase(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def testUserLookup(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.failUnlessRaises(KeyError, db.getUser, u.upper())
self.assertEquals(db.getUser(u), (u, p))
def testCaseInSensitivity(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.assertEquals(db.getUser(u.upper()), (u, p))
def testRequestAvatarId(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEquals, [u for u, p in self.users])
return d
def testRequestAvatarId_hashed(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernameHashedPassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEquals, [u for u, p in self.users])
return d
class HashedPasswordOnDiskDatabaseTestCase(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def hash(self, u, p, s):
return crypt(p, s)
def setUp(self):
dbfile = self.mktemp()
self.db = checkers.FilePasswordDB(dbfile, hash=self.hash)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, crypt(p, u[:2])))
f.close()
r = TestRealm()
self.port = portal.Portal(r)
self.port.registerChecker(self.db)
def testGoodCredentials(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.db.requestAvatarId(c) for c in goodCreds])
d.addCallback(self.assertEquals, [u for u, p in self.users])
return d
def testGoodCredentials_login(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.port.login(c, None, ITestable)
for c in goodCreds])
d.addCallback(lambda x: [a.original.name for i, a, l in x])
d.addCallback(self.assertEquals, [u for u, p in self.users])
return d
def testBadCredentials(self):
badCreds = [credentials.UsernamePassword(u, 'wrong password')
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in badCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnauthorizedLogin)
return d
def testHashedCredentials(self):
hashedCreds = [credentials.UsernameHashedPassword(u, crypt(p, u[:2]))
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in hashedCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnhandledCredentials)
return d
def _assertFailures(self, failures, *expectedFailures):
for flag, failure in failures:
self.failUnlessEqual(flag, defer.FAILURE)
failure.trap(*expectedFailures)
return None
if crypt is None:
skip = "crypt module not available"
class PluggableAuthenticationModulesTest(unittest.TestCase):
def setUpClass(self):
self._oldCallIntoPAM = pamauth.callIntoPAM
pamauth.callIntoPAM = self.callIntoPAM
def tearDownClass(self):
pamauth.callIntoPAM = self._oldCallIntoPAM
def callIntoPAM(self, service, user, conv):
if service != 'Twisted':
raise error.UnauthorizedLogin('bad service: %s' % service)
if user != 'testuser':
raise error.UnauthorizedLogin('bad username: %s' % user)
questions = [
(1, "Password"),
(2, "Message w/ Input"),
(3, "Message w/o Input"),
]
replies = conv(questions)
if replies != [
("password", 0),
("entry", 0),
("", 0)
]:
raise error.UnauthorizedLogin('bad conversion: %s' % repr(replies))
return 1
def _makeConv(self, d):
def conv(questions):
return defer.succeed([(d[t], 0) for t, q in questions])
return conv
def testRequestAvatarId(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
d.addCallback(self.assertEquals, 'testuser')
return d
def testBadCredentials(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'', 2:'', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
def testBadUsername(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('baduser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
if not pamauth:
skip = "Can't run without PyPAM"
class CheckersMixin:
def testPositive(self):
for chk in self.getCheckers():
for (cred, avatarId) in self.getGoodCredentials():
r = wFD(chk.requestAvatarId(cred))
yield r
self.assertEquals(r.getResult(), avatarId)
testPositive = dG(testPositive)
def testNegative(self):
for chk in self.getCheckers():
for cred in self.getBadCredentials():
r = wFD(chk.requestAvatarId(cred))
yield r
self.assertRaises(error.UnauthorizedLogin, r.getResult)
# Work around deferredGenerator bug
yield None
testNegative = dG(testNegative)
class HashlessFilePasswordDBMixin:
credClass = credentials.UsernamePassword
diskHash = None
networkHash = staticmethod(lambda x: x)
_validCredentials = [
('user1', 'password1'),
('user2', 'password2'),
('user3', 'password3')]
def getGoodCredentials(self):
for u, p in self._validCredentials:
yield self.credClass(u, self.networkHash(p)), u
def getBadCredentials(self):
for u, p in [('user1', 'password3'),
('user2', 'password1'),
('bloof', 'blarf')]:
yield self.credClass(u, self.networkHash(p))
def getCheckers(self):
diskHash = self.diskHash or (lambda x: x)
hashCheck = self.diskHash and (lambda username, password, stored: self.diskHash(password))
for cache in True, False:
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s:%s\n' % (u, diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s dingle dongle %s\n' % (diskHash(p), u))
fObj.close()
yield checkers.FilePasswordDB(fn, ' ', 3, 0, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('zip,zap,%s,zup,%s\n' % (u.title(), diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, ',', 2, 4, False, cache=cache, hash=hashCheck)
class LocallyHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
diskHash = staticmethod(lambda x: x.encode('hex'))
class NetworkHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
networkHash = staticmethod(lambda x: x.encode('hex'))
class credClass(credentials.UsernameHashedPassword):
def checkPassword(self, password):
return self.hashed.decode('hex') == password
class HashlessFilePasswordDBCheckerTestCase(HashlessFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class LocallyHashedFilePasswordDBCheckerTestCase(LocallyHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class NetworkHashedFilePasswordDBCheckerTestCase(NetworkHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
| tquilian/exeNext | twisted/test/test_newcred.py | Python | gpl-2.0 | 13,850 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from contextlib import contextmanager
import re
from cartouche._portability import u
from .errors import CartoucheError
from .nodes import (Node, Raises, Except, Note, Warning, Returns, Arg, Yields,
Attribute, Usage, ensure_terminal_blank)
OPTIONAL_BULLET_PATTERN = u(r'(?:[\*\+\-\•\‣\⁃]\s+)?')
ARGS_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
ATTRIBUTES_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
RAISES_PATTERN = u(r'([\w\.]+)\s*:\s*(.*)')
ARGS_REGEX = re.compile(ARGS_PATTERN)
ATTRIBUTES_REGEX = re.compile(ATTRIBUTES_PATTERN)
RAISES_REGEX = re.compile(RAISES_PATTERN)
class CartoucheSyntaxError(CartoucheError):
pass
def parse_cartouche_text(lines):
'''Parse text in cartouche format and return a reStructuredText equivalent
Args:
lines: A sequence of strings representing the lines of a single
docstring as read from the source by Sphinx. This string should be
in a format that can be parsed by cartouche.
Returns:
A list of lines containing the transformed docstring as
reStructuredText as produced by cartouche.
Raises:
RuntimeError: If the docstring cannot be parsed.
'''
indent_lines = unindent(lines)
indent_lines = pad_blank_lines(indent_lines)
indent_lines = first_paragraph_indent(indent_lines)
indent_paragraphs = gather_lines(indent_lines)
parse_tree = group_paragraphs(indent_paragraphs)
syntax_tree = extract_structure(parse_tree)
result = syntax_tree.render_rst()
ensure_terminal_blank(result)
return result
def unindent(lines):
'''Convert an iterable of indented lines into a sequence of tuples.
The first element of each tuple is the indent in number of characters, and
the second element is the unindented string.
Args:
lines: A sequence of strings representing the lines of text in a docstring.
Returns:
A list of tuples where each tuple corresponds to one line of the input
list. Each tuple has two entries - the first is an integer giving the
size of the indent in characters, the second is the unindented text.
'''
unindented_lines = []
for line in lines:
unindented_line = line.lstrip()
indent = len(line) - len(unindented_line)
unindented_lines.append((indent, unindented_line))
return unindented_lines
def pad_blank_lines(indent_texts):
'''Give blank (empty) lines the same indent level as the preceding line.
Args:
indent_texts: An iterable of tuples each containing an integer in the
first element and a string in the second element.
Returns:
A list of tuples each containing an integer in the first element and a
string in the second element.
'''
current_indent = 0
result = []
for indent, text in indent_texts:
if len(text) > 0:
current_indent = indent
result.append((current_indent, text))
return result
def extract_structure(parse_tree):
'''Create an Abstract Syntax Tree representing the semantics of a parse tree.
Args:
parse_tree: TODO
Returns:
A Node with is the result of an Abstract Syntax Tree representing the
docstring.
Raises:
CartoucheError: In the event that the parse tree cannot be understood.
'''
return convert_node(parse_tree)
def convert_node(node):
if node.indent == 0 and len(node.lines) == 0:
return convert_children(node)
if node.lines[0].startswith('Args:'):
return convert_args(node)
if node.lines[0].startswith('Returns:'):
return convert_returns(node)
if node.lines[0].startswith('Yields:'):
return convert_yields(node)
if node.lines[0].startswith('Raises:'):
return convert_raises(node)
if node.lines[0].startswith('Note:'):
return convert_note(node)
if node.lines[0].startswith('Warning:'):
return convert_warning(node)
if node.lines[0].startswith('Attributes:'):
return convert_attributes(node)
if node.lines[0].startswith('Usage:'):
return convert_usage(node)
result = convert_children(node)
result.lines = node.lines
result.indent = node.indent
return result
def convert_children(node):
converted_children = [convert_node(child) for child in node.children]
result = Node()
result.children = converted_children
return result
def append_child_to_args_group_node(child, group_node, indent):
arg = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ARGS_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Args block'.format(line=line))
param_name = m.group(1)
param_type = m.group(3)
param_text = m.group(4)
arg = Arg(indent, param_name)
group_node.children.append(arg)
arg.type = param_type
if param_text is not None:
arg.children.append(Node(indent, [param_text], arg))
if arg is not None:
last_child = arg.children[-1] if len(arg.children) != 0 else arg
for grandchild in child.children:
last_child.children.append(grandchild)
def append_child_to_attributes_group_node(child, group_node, indent):
attribute = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ATTRIBUTES_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid attribute syntax "{line}" for Attributes block'.format(line=line))
attribute_name = m.group(1)
attribute_type = m.group(3)
attribute_text = m.group(4)
attribute = Attribute(indent, attribute_name)
group_node.children.append(attribute)
attribute.type = attribute_type
if attribute_text is not None:
attribute.children.append(Node(indent, [attribute_text], attribute))
if attribute is not None:
last_child = attribute.children[-1] if len(attribute.children) != 0 else attribute
for grandchild in child.children:
last_child.children.append(grandchild)
def convert_args(node):
assert node.lines[0].startswith('Args:')
group_node = Node()
for child in node.children:
append_child_to_args_group_node(child, group_node, node.indent)
return group_node
def convert_returns(node):
assert node.lines[0].startswith('Returns:')
returns = Returns(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_yields(node):
assert node.lines[0].startswith('Yields:')
returns = Yields(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_note(node):
assert node.lines[0].startswith('Note:')
note = Note(node.indent)
note.line = node.lines[0][5:].strip()
note.children = node.children
return note
def convert_warning(node):
assert node.lines[0].startswith('Warning:')
warning = Warning(node.indent)
warning.line = node.lines[0][8:].strip()
warning.children = node.children
return warning
def convert_raises(node):
assert node.lines[0].startswith('Raises:')
group_node = Raises(node.indent)
for child in node.children:
append_child_to_raise_node(child, group_node)
return group_node
def convert_attributes(node):
assert node.lines[0].startswith('Attributes:')
group_node = Node()
for child in node.children:
append_child_to_attributes_group_node(child, group_node, node.indent)
return group_node
def convert_usage(node):
assert node.lines[0].startswith('Usage:')
usage = Usage(node.indent)
usage.children = node.children
return usage
def parse_exception(line):
'''Parse the first line of a Cartouche exception description.
Args:
line (str): A single line Cartouche exception description.
Returns:
A 2-tuple containing the exception type and the first line of the description.
'''
m = RAISES_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Raises block'.format(line=line))
return m.group(2), m.group(1)
def append_child_to_raise_node(child, group_node):
exception = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
exception_text, exception_type = parse_exception(line)
exception = Except(child.indent, exception_type)
group_node.children.append(exception) # TODO: Could use parent here.
if exception_text is not None:
exception.children.append( Node(child.indent,
[exception_text], exception))
if exception is not None:
last_child = exception.children[-1] if len(exception.children) != 0 else exception
for grandchild in child.children:
last_child.children.append(grandchild)
def group_paragraphs(indent_paragraphs):
'''
Group paragraphs so that more indented paragraphs become children of less
indented paragraphs.
'''
# The tree consists of tuples of the form (indent, [children]) where the
# children may be strings or other tuples
root = Node(0, [], None)
current_node = root
previous_indent = -1
for indent, lines in indent_paragraphs:
if indent > previous_indent:
current_node = create_child_node(current_node, indent, lines)
elif indent == previous_indent:
current_node = create_sibling_node(current_node, indent, lines)
elif indent < previous_indent:
current_node = create_uncle_node(current_node, indent, lines)
previous_indent = indent
return root
def create_sibling_node(current_node, indent, lines):
sibling = Node(indent, lines, current_node.parent)
current_node.parent.add_child(sibling)
current_node = sibling
return current_node
def create_child_node(current_node, indent, lines):
child = Node(indent, lines, current_node)
current_node.add_child(child)
current_node = child
return current_node
def create_uncle_node(current_node, indent, lines):
ancestor = current_node
while ancestor.indent >= indent:
if ancestor.parent is None:
break
ancestor = ancestor.parent
uncle = Node(indent, lines, ancestor)
ancestor.add_child(uncle)
current_node = uncle
return current_node
def gather_lines(indent_lines):
'''Split the list of (int, str) tuples into a list of (int, [str]) tuples
to group the lines into paragraphs of consistent indent.
'''
return remove_empty_paragraphs(split_separated_lines(gather_lines_by_indent(indent_lines)))
def gather_lines_by_indent(indent_lines):
result = []
previous_indent = -1
for indent, line in indent_lines:
if indent != previous_indent:
paragraph = (indent, [])
result.append(paragraph)
else:
paragraph = result[-1]
paragraph[1].append(line)
previous_indent = indent
return result
def split_separated_lines(indent_paragraphs):
result = []
for indent, paragraph in indent_paragraphs:
result.append((indent, []))
if len(paragraph) > 0:
result[-1][1].append(paragraph[0])
if len(paragraph) > 2:
for line in paragraph[1: -1]:
result[-1][1].append(line)
if len(line) == 0:
result.append((indent, []))
if len(paragraph) > 1:
result[-1][1].append(paragraph[-1])
return result
def remove_empty_paragraphs(indent_paragraphs):
return [(indent, paragraph) for indent, paragraph in indent_paragraphs if len(paragraph)]
def first_paragraph_indent(indent_texts):
'''Fix the indentation on the first paragraph.
This occurs because the first line of a multi-line docstring following the
opening quote usually has no indent.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Return:
A list of 2-tuples, each containing an integer indent level as the
first element and the text as the second element.
'''
opening_indent = determine_opening_indent(indent_texts)
result = []
input = iter(indent_texts)
for indent, text in input:
if indent == 0:
result.append((opening_indent, text))
else:
result.append((indent, text))
break
for indent, text in input:
result.append((indent, text))
return result
def determine_opening_indent(indent_texts):
'''Determine the opening indent level for a docstring.
The opening indent level is the indent level is the first non-zero indent
level of a non-empty line in the docstring.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Returns:
The opening indent level as an integer.
'''
num_lines = len(indent_texts)
if num_lines < 1:
return 0
assert num_lines >= 1
first_line_indent = indent_texts[0][0]
if num_lines == 1:
return first_line_indent
assert num_lines >= 2
second_line_indent = indent_texts[1][0]
second_line_text = indent_texts[1][1]
if len(second_line_text) == 0:
return first_line_indent
return second_line_indent
#noinspection PyUnusedLocal
def rewrite_autodoc(app, what, name, obj, options, lines):
'''Convert lines from Cartouche to Sphinx format.
The function to be called by the Sphinx autodoc extension when autodoc
has read and processed a docstring. This function modified its
``lines`` argument *in place* replacing Cartouche syntax input into
Sphinx reStructuredText output.
Args:
apps: The Sphinx application object.
what: The type of object which the docstring belongs to. One of
'module', 'class', 'exception', 'function', 'method', 'attribute'
name: The fully qualified name of the object.
obj: The object itself.
options: The options given to the directive. An object with attributes
``inherited_members``, ``undoc_members``, ``show_inheritance`` and
``noindex`` that are ``True`` if the flag option of the same name
was given to the auto directive.
lines: The lines of the docstring. Will be modified *in place*.
Raises:
CartoucheSyntaxError: If the docstring is malformed.
'''
try:
lines[:] = parse_cartouche_text(lines)
except CartoucheSyntaxError as syntax_error:
args = syntax_error.args
arg0 = args[0] if args else ''
arg0 += " in docstring for {what} {name} :".format(what=what, name=name)
arg0 += "\n=== BEGIN DOCSTRING ===\n{lines}\n=== END DOCSTRING ===\n".format(lines='\n'.join(lines))
#noinspection PyPropertyAccess
syntax_error.args = (arg0,) + args[1:]
raise
def accept_bulleted_args():
'''Further use of the parser will accept bulleted lists for Args.'''
global ARGS_REGEX
ARGS_REGEX = re.compile(OPTIONAL_BULLET_PATTERN + ARGS_PATTERN)
def reject_bulleted_args():
'''Further use of the parser will reject bulleted lists for Args.'''
global ARGS_REGEX
ARGS_REGEX = re.compile(ARGS_PATTERN)
def accept_bulleted_raises():
'''Further use of the parser will accept bulleted lists for Raises.'''
global RAISES_REGEX
RAISES_REGEX = re.compile(OPTIONAL_BULLET_PATTERN + RAISES_PATTERN)
def reject_bulleted_raises():
'''Further use of the parser will reject bulleted lists for Raises.'''
global RAISES_REGEX
RAISES_REGEX = re.compile(RAISES_PATTERN)
@contextmanager
def bulleted_args():
'''A context manager within the scope of which bulleted Args will be accepted.'''
global ARGS_REGEX
previous_args_regex = ARGS_REGEX
accept_bulleted_args()
yield
ARGS_REGEX = previous_args_regex
@contextmanager
def bulleted_raises():
'''A context manager within the scope of which bulleted Raises will be accepted.'''
global RAISES_REGEX
previous_raises_regex = RAISES_REGEX
accept_bulleted_raises()
yield
RAISES_REGEX = previous_raises_regex
def builder_inited(app):
if app.config.cartouche_accept_bulleted_args:
accept_bulleted_args()
if app.config.cartouche_accept_bulleted_raises:
accept_bulleted_raises()
| rob-smallshire/cartouche | cartouche/parser.py | Python | bsd-3-clause | 17,022 |
import socket
import IPython
import datetime
import logging
import random
import threading
import struct
import time
import sys
import os
# logging
formatter="%(asctime)s %(levelname)-12s %(message)s"
# to file
log_filename="log_"+datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d.%H.%M.%S.%f')
logging.basicConfig( filename=log_filename, filemode="a", format=formatter, level=logging.INFO);
# to console
formatter = logging.Formatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
ip_addr = sys.argv[1]
server_address = (ip_addr, 5555)
print >>sys.stderr, 'connecting to %s port %s' % server_address
sock.connect(server_address)
def data_recv():
while True:
data = ''
for i in range(1,10):
time.sleep(0.2)
data = sock.recv(8192)
count = len(data)
if count != 0:
if data != '':
logging.info("recv: " + (" ".join(("%02x" % struct.unpack('B', n)) for n in data)))
def gen_random_data(a,b):
len = random.randint(a,b)
rand_list = []
for i in range(len):
rand_list +=[random.randint(0,255)]
return rand_list
def random_package():
len = random.randint(100,255)
c1 = random.randint(0,255)
c2 = random.randint(0,255)
rand_list = [0x1b, c1, c2, len]
for i in range(len):
rand_list +=[random.randint(1,255)]
return rand_list
def print_hex(data):
logging.info(" ".join(("%02x" % n) for n in data))
def hex2bin(data_hex):
data_bin=''
for d in data_hex:
data_bin = data_bin + struct.pack('B', d)
return data_bin
def send_cmd(cmd):
sock.sendall(cmd)
logging.info("send: ")
print_hex(cmd)
logging.info("")
def main():
t = threading.Thread(target=data_recv)
t.setDaemon(True)
t.start()
try:
IPython.embed()
finally:
print >>sys.stderr, 'closing socket'
sock.close()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
if sock != None:
sock.close()
| solvery/lang-features | python/case/case.dkm_api_sender_tcp_1/dkm_api_sender_tcp_interact.py | Python | gpl-2.0 | 2,422 |
import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
from .models import TestModel
class HTTPSitemapTests(SitemapTestsBase):
use_sitemap_err_msg = (
'To use sitemaps, either enable the sites framework or pass a '
'Site/RequestSite object in your view.'
)
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_not_callable(self):
"""A sitemap may not be callable."""
response = self.client.get('/simple-not-callable/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_paged_sitemap(self):
"""A sitemap may have multiple pages."""
response = self.client.get('/simple-paged/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>{0}/simple/sitemap-simple.xml</loc></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc></sitemap>
</sitemapindex>
""".format(self.base_url)
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_no_section(self):
response = self.client.get('/simple/sitemap-simple2.xml')
self.assertEqual(str(response.context['exception']), "No sitemap available for section: 'simple2'")
self.assertEqual(response.status_code, 404)
def test_empty_page(self):
response = self.client.get('/simple/sitemap-simple.xml?p=0')
self.assertEqual(str(response.context['exception']), 'Page 0 empty')
self.assertEqual(response.status_code, 404)
def test_page_not_int(self):
response = self.client.get('/simple/sitemap-simple.xml?p=test')
self.assertEqual(str(response.context['exception']), "No page 'test'")
self.assertEqual(response.status_code, 404)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_last_modified(self):
"Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-descending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_ascending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/ascending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
def test_sitemaps_lastmod_descending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/descending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Priorities haven't been rendered in localized format.
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Hitting the flatpages sitemap without the sites framework installed
# doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode(), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = Sitemap()
test_sitemap.items = TestModel.objects.order_by('pk').all
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
A cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_without_entries(self):
response = self.client.get('/sitemap-without-entries/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
</urlset>"""
self.assertXMLEqual(response.content.decode(), expected_content)
| ifduyue/django | tests/sitemaps_tests/test_http.py | Python | bsd-3-clause | 12,772 |
"""Testcases for cssutils.css.CSSCharsetRule"""
__version__ = '$Id: test_csscharsetrule.py 1356 2008-07-13 17:29:09Z cthedot $'
import sys
import xml.dom
import basetest
from cssutils.prodparser import *
from cssutils.prodparser import ParseError, Done, Exhausted, NoMatch # not in __all__
class ProdTestCase(basetest.BaseTestCase):
def test_init(self):
"Prod.__init__(...)"
p = Prod('min', lambda t, v: t == 1 and v == 2)
self.assertEqual(str(p), 'min')
self.assertEqual(p.toStore, None)
self.assertEqual(p.optional, False)
p = Prod('optional', lambda t, v: True,
optional=True)
self.assertEqual(p.optional, True)
def test_initMatch(self):
"Prod.__init__(...match=...)"
p = Prod('min', lambda t, v: t == 1 and v == 2)
self.assertEqual(p.match(1, 2), True)
self.assertEqual(p.match(2, 2), False)
self.assertEqual(p.match(1, 1), False)
def test_initToSeq(self):
"Prod.__init__(...toSeq=...)"
# simply saves
p = Prod('all', lambda t, tokens: True,
toSeq=None)
self.assertEqual(p.toSeq([1, 2], None), (1, 2)) # simply saves
self.assertEqual(p.toSeq(['s1', 's2'], None), ('s1', 's2')) # simply saves
# saves callback(val)
p = Prod('all', lambda t, v: True,
toSeq=lambda t, tokens: (1 == t[0], 3 == t[1]))
self.assertEqual(p.toSeq([1, 3], None), (True, True))
self.assertEqual(p.toSeq([2, 4], None), (False, False))
def test_initToStore(self):
"Prod.__init__(...toStore=...)"
p = Prod('all', lambda t, v: True,
toStore='key')
# save as key
s = {}
p.toStore(s, 1)
self.assertEqual(s['key'], 1)
# append to key
s = {'key': []}
p.toStore(s, 1)
p.toStore(s, 2)
self.assertEqual(s['key'], [1, 2])
# callback
def doubleToStore(key):
def toStore(store, item):
store[key] = item * 2
return toStore
p = Prod('all', lambda t, v: True,
toStore=doubleToStore('key'))
s = {'key': []}
p.toStore(s, 1)
self.assertEqual(s['key'], 2)
def test_matches(self):
"Prod.matches(token)"
p1 = Prod('p1', lambda t, v: t == 1 and v == 2)
p2 = Prod('p2', lambda t, v: t == 1 and v == 2, optional=True)
self.assertEqual(p1.matches([1, 2, 0, 0]), True)
self.assertEqual(p2.matches([1, 2, 0, 0]), True)
self.assertEqual(p1.matches([0, 0, 0, 0]), False)
self.assertEqual(p2.matches([0, 0, 0, 0]), False)
class SequenceTestCase(basetest.BaseTestCase):
def test_init(self):
"Sequence.__init__()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
seq = Sequence(p1, p1)
self.assertEqual(1, seq._min)
self.assertEqual(1, seq._max)
def test_initminmax(self):
"Sequence.__init__(...minmax=...)"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
s = Sequence(p1, p2, minmax=lambda: (2, 3))
self.assertEqual(2, s._min)
self.assertEqual(3, s._max)
s = Sequence(p1, p2, minmax=lambda: (0, None))
self.assertEqual(0, s._min)
try:
# py2.6/3
m = sys.maxsize
except AttributeError:
# py<1.6
m = sys.maxint
self.assertEqual(m, s._max)
def test_optional(self):
"Sequence.optional"
p1 = Prod('p1', lambda t, v: t == 1)
s = Sequence(p1, minmax=lambda: (1, 3))
self.assertEqual(False, s.optional)
s = Sequence(p1, minmax=lambda: (0, 3))
self.assertEqual(True, s.optional)
s = Sequence(p1, minmax=lambda: (0, None))
self.assertEqual(True, s.optional)
def test_reset(self):
"Sequence.reset()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
seq = Sequence(p1, p2)
t1 = (1, 0, 0, 0)
t2 = (2, 0, 0, 0)
self.assertEqual(p1, seq.nextProd(t1))
self.assertEqual(p2, seq.nextProd(t2))
self.assertRaises(Exhausted, seq.nextProd, t1)
seq.reset()
self.assertEqual(p1, seq.nextProd(t1))
def test_matches(self):
"Sequence.matches()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2, optional=True)
t1 = (1, 0, 0, 0)
t2 = (2, 0, 0, 0)
t3 = (3, 0, 0, 0)
s = Sequence(p1, p2)
self.assertEqual(True, s.matches(t1))
self.assertEqual(False, s.matches(t2))
s = Sequence(p2, p1)
self.assertEqual(True, s.matches(t1))
self.assertEqual(True, s.matches(t2))
s = Sequence(Choice(p1, p2))
self.assertEqual(True, s.matches(t1))
self.assertEqual(True, s.matches(t2))
self.assertEqual(False, s.matches(t3))
def test_nextProd(self):
"Sequence.nextProd()"
p1 = Prod('p1', lambda t, v: t == 1, optional=True)
p2 = Prod('p2', lambda t, v: t == 2)
t1 = (1, 0, 0, 0)
t2 = (2, 0, 0, 0)
tests = {
# seq: list of list of (token, prod or error msg)
(p1, ): ([(t1, p1)],
[(t2, 'Extra token')], # as p1 optional
[(t1, p1), (t1, u'Extra token')],
[(t1, p1), (t2, u'Extra token')]
),
(p2, ): ([(t2, p2)],
[(t2, p2), (t2, u'Extra token')],
[(t2, p2), (t1, u'Extra token')],
[(t1, 'Missing token for production p2')]
),
(p1, p2): ([(t1, p1), (t2, p2)],
[(t1, p1), (t1, u'Missing token for production p2')]
)
}
for seqitems, results in tests.items():
for result in results:
seq = Sequence(*seqitems)
for t, p in result:
if isinstance(p, basestring):
self.assertRaisesMsg(ParseError, p, seq.nextProd, t)
else:
self.assertEqual(p, seq.nextProd(t))
tests = {
# seq: list of list of (token, prod or error msg)
# as p1 optional!
(p1, p1): ([(t1, p1)],
[(t1, p1), (t1, p1)],
[(t1, p1), (t1, p1)],
[(t1, p1), (t1, p1), (t1, p1)],
[(t1, p1), (t1, p1), (t1, p1), (t1, p1)],
[(t1, p1), (t1, p1), (t1, p1), (t1, p1), (t1, u'Extra token')],
),
(p1, ): ([(t1, p1)],
[(t2, 'Extra token')],
[(t1, p1), (t1, p1)],
[(t1, p1), (t2, 'Extra token')],
[(t1, p1), (t1, p1), (t1, u'Extra token')],
[(t1, p1), (t1, p1), (t2, u'Extra token')]
),
# as p2 NOT optional
(p2, ): ([(t2, p2)],
[(t1, 'Missing token for production p2')],
[(t2, p2), (t2, p2)],
[(t2, p2), (t1, u'No match for (1, 0, 0, 0) in Sequence(p2)')],
[(t2, p2), (t2, p2), (t2, u'Extra token')],
[(t2, p2), (t2, p2), (t1, u'Extra token')]
),
(p1, p2): ([(t1, p1), (t1, u'Missing token for production p2')],
[(t2, p2), (t2, p2)],
[(t2, p2), (t1, p1), (t2, p2)],
[(t1, p1), (t2, p2), (t2, p2)],
[(t1, p1), (t2, p2), (t1, p1), (t2, p2)],
[(t2, p2), (t2, p2), (t2, u'Extra token')],
[(t2, p2), (t1, p1), (t2, p2), (t1, 'Extra token')],
[(t2, p2), (t1, p1), (t2, p2), (t2, 'Extra token')],
[(t1, p1), (t2, p2), (t2, p2), (t1, 'Extra token')],
[(t1, p1), (t2, p2), (t2, p2), (t2, 'Extra token')],
[(t1, p1), (t2, p2), (t1, p1), (t2, p2), (t1, 'Extra token')],
[(t1, p1), (t2, p2), (t1, p1), (t2, p2), (t2, 'Extra token')],
)
}
for seqitems, results in tests.items():
for result in results:
seq = Sequence(minmax=lambda: (1,2), *seqitems)
for t, p in result:
if isinstance(p, basestring):
self.assertRaisesMsg(ParseError, p, seq.nextProd, t)
else:
self.assertEqual(p, seq.nextProd(t))
class ChoiceTestCase(basetest.BaseTestCase):
def test_init(self):
"Choice.__init__()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
t0 = (0,0,0,0)
t1 = (1,0,0,0)
t2 = (2,0,0,0)
ch = Choice(p1, p2)
self.assertRaisesMsg(ParseError, u'No match for (0, 0, 0, 0) in Choice(p1, p2)', ch.nextProd, t0)
self.assertEqual(p1, ch.nextProd(t1))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t1)
ch = Choice(p1, p2)
self.assertEqual(p2, ch.nextProd(t2))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t2)
ch = Choice(p2, p1)
self.assertRaisesMsg(ParseError, 'No match for (0, 0, 0, 0) in Choice(p2, p1)', ch.nextProd, t0)
self.assertEqual(p1, ch.nextProd(t1))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t1)
ch = Choice(p2, p1)
self.assertEqual(p2, ch.nextProd(t2))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t2)
def test_matches(self):
"Choice.matches()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2, optional=True)
t1 = (1, 0, 0, 0)
t2 = (2, 0, 0, 0)
t3 = (3, 0, 0, 0)
c = Choice(p1, p2)
self.assertEqual(True, c.matches(t1))
self.assertEqual(True, c.matches(t2))
self.assertEqual(False, c.matches(t3))
c = Choice(Sequence(p1), Sequence(p2))
self.assertEqual(True, c.matches(t1))
self.assertEqual(True, c.matches(t2))
self.assertEqual(False, c.matches(t3))
def test_nested(self):
"Choice with nested Sequence"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
s1 = Sequence(p1, p1)
s2 = Sequence(p2, p2)
t0 = (0,0,0,0)
t1 = (1,0,0,0)
t2 = (2,0,0,0)
ch = Choice(s1, s2)
self.assertRaisesMsg(ParseError, u'No match for (0, 0, 0, 0) in Choice(Sequence(p1, p1), Sequence(p2, p2))', ch.nextProd, t0)
self.assertEqual(s1, ch.nextProd(t1))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t1)
ch = Choice(s1, s2)
self.assertEqual(s2, ch.nextProd(t2))
self.assertRaisesMsg(Exhausted, u'Extra token', ch.nextProd, t1)
def test_reset(self):
"Choice.reset()"
p1 = Prod('p1', lambda t, v: t == 1)
p2 = Prod('p2', lambda t, v: t == 2)
t1 = (1,0,0,0)
t2 = (2,0,0,0)
ch = Choice(p1, p2)
self.assertEqual(p1, ch.nextProd(t1))
self.assertRaises(Exhausted, ch.nextProd, t1)
ch.reset()
self.assertEqual(p2, ch.nextProd(t2))
class ProdParserTestCase(basetest.BaseTestCase):
def setUp(self):
pass
def test_parse_keepS(self):
"ProdParser.parse(keepS)"
p = ProdParser()
# text, name, productions, store=None
prods = lambda: Sequence(PreDef.char(';', u';'),
PreDef.char(':', u':')
)
w, seq, store, unused = p.parse('; :', 'test', prods(),
keepS=True)
self.assertTrue(w)
self.assertEqual(3, len(seq))
w, seq, store, unused = p.parse('; :', 'test', prods(),
keepS=False)
self.assertTrue(w)
self.assertEqual(2, len(seq))
def test_combi(self):
"ProdParser.parse() 2"
p1 = Prod('p1', lambda t, v: v == '1')
p2 = Prod('p2', lambda t, v: v == '2')
p3 = Prod('p3', lambda t, v: v == '3')
tests = {'1 2': True,
'1 2 1 2': True,
'3': True,
#'': 'No match in Choice(Sequence(p1, p2), p3)',
'1': 'Missing token for production p2',
'1 2 1': 'Missing token for production p2',
'1 2 1 2 x': "No match: ('IDENT', 'x', 1, 9)",
'1 2 1 2 1': "No match: ('NUMBER', '1', 1, 9)",
'3 x': "No match: ('IDENT', 'x', 1, 3)",
'3 3': "No match: ('NUMBER', '3', 1, 3)",
}
for text, exp in tests.items():
prods = Choice(Sequence(p1, p2, minmax=lambda: (1,2)),
p3)
if exp is True:
wellformed, seq, store, unused = ProdParser().parse(text, 'T', prods)
self.assertEqual(wellformed, exp)
else:
self.assertRaisesMsg(xml.dom.SyntaxErr, u'T: %s' % exp,
ProdParser().parse, text, 'T', prods)
tests = {'1 3': True,
'1 1 3': True,
'2 3': True,
'1': 'Missing token for production p3',
'1 1': 'Missing token for production p3',
'1 3 3': "No match: ('NUMBER', '3', 1, 5)",
'1 1 3 3': "No match: ('NUMBER', '3', 1, 7)",
'2 3 3': "No match: ('NUMBER', '3', 1, 5)",
'2': 'Missing token for production p3',
'3': "Missing token for production Choice(Sequence(p1), p2): ('NUMBER', '3', 1, 1)",
}
for text, exp in tests.items():
prods = Sequence(Choice(Sequence(p1, minmax=lambda: (1,2)),
p2),
p3)
if exp is True:
wellformed, seq, store, unused = ProdParser().parse(text, 'T', prods)
self.assertEqual(wellformed, exp)
else:
self.assertRaisesMsg(xml.dom.SyntaxErr, u'T: %s' % exp,
ProdParser().parse, text, 'T', prods)
if __name__ == '__main__':
import unittest
unittest.main()
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/cssutils/tests/test_prodparser.py | Python | agpl-3.0 | 15,466 |
import pytest
import six
from mock import call, patch
from tests import utils
from week_parser.base import parse_row, parse_week, populate_extra_data
from week_parser.main import PrettyPrinter
def test_populate_extra_data_no_days():
"""
If we haven't found any days data, there is not extra data to add
"""
week_data = {}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {}
def test_populate_extra_data_square_day():
"""
If we have found a 'square' day, the description and square value is added
"""
value = 7
week_data = {'mon': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'mon': {
'value': value,
'square': value ** 2,
'description': '{} {}'.format(description, value ** 2)
}
}
def test_populate_extra_data_double_day():
"""
If we have found a 'double' day, the description and double value is added
"""
value = 7
week_data = {'thu': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'thu': {
'value': value,
'double': value * 2,
'description': '{} {}'.format(description, value * 2)
}
}
def test_parse_row_single_day():
"""
If the input row contains a single day, it is outputted
"""
row = {'mon': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'mon': {'day': 'mon', 'value': 3}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_day_range():
"""
If the input row contains a day range, it is outputted
"""
row = {'mon-wed': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 3},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_extra_columns():
"""
If the input row contains any extra columns, they are skipped
"""
row = {'wed': '2', 'description': '__DESCRIPTION__',
'__FOO__': '__BAR__', '__ANYTHING__': '__ELSE__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'wed': {'day': 'wed', 'value': 2}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_not_int_value():
"""
If the day value is not an integer, we get a ValueError
"""
row = {'mon': '__NOT_A_NUMBER__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
with pytest.raises(ValueError) as exc:
parse_row(row)
assert mock_populate.call_count == 0
assert str(exc.value) == (
"invalid literal for int() with base 10: '__NOT_A_NUMBER__'")
def test_parse_row_invalid_day_range():
"""
If the input row contains an invalid day range, we skip it
"""
row = {'foo-bar': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row():
"""
An input row may contain any combination of day ranges
"""
row = {'mon-tue': '3', 'wed-thu': '2', 'fri': '1',
'__SOME__': '__DATA__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 2},
'thu': {'day': 'thu', 'value': 2},
'fri': {'day': 'fri', 'value': 1},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_week_empty_file():
"""
We can process an empty file
"""
filename = 'anything.csv'
with utils.mock_open(file_content='') as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_week:
result = parse_week(filename)
assert result == []
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_week.call_count == 0
def test_parse_week_valid_file():
"""
We can process a file with valid content
"""
filename = 'anything.csv'
csv_data = ('mon,tue,some_column1,wed,thu,fri,description\n'
'1,5,data,2,3,3,first_desc\n')
expected_row = {'mon': '1', 'tue': '5', 'wed': '2', 'thu': '3', 'fri': '3',
'description': 'first_desc', 'some_column1': 'data'}
with utils.mock_open(file_content=csv_data) as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_row:
mock_parse_row.return_value = {'mon': {'day': 'mon'}}
result = parse_week(filename)
assert result == [{'day': 'mon'}]
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_row.call_args_list == [call(expected_row)]
def test_pprint_bytes(capsys):
printer = PrettyPrinter()
printer.pprint(six.b('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
def test_pprint_unicode(capsys):
printer = PrettyPrinter()
printer.pprint(six.u('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
| JoseKilo/week_parser | tests/unit/test_week_parser.py | Python | mit | 5,936 |
"""Tests for the Whois integration."""
| rohitranjan1991/home-assistant | tests/components/whois/__init__.py | Python | mit | 39 |
from PyQt4 import QtGui
from Orange.data import Table
from Orange.widgets import gui, widget
from Orange.widgets.settings import Setting
from Orange.preprocess.remove import Remove
class OWPurgeDomain(widget.OWWidget):
name = "Purge Domain"
description = "Remove redundant values and features from the data set. " \
"Sorts values."
icon = "icons/PurgeDomain.svg"
category = "Data"
keywords = ["data", "purge", "domain"]
inputs = [("Data", Table, "setData")]
outputs = [("Data", Table)]
removeValues = Setting(1)
removeAttributes = Setting(1)
removeClassAttribute = Setting(1)
removeClasses = Setting(1)
autoSend = Setting(False)
sortValues = Setting(True)
sortClasses = Setting(True)
want_main_area = False
resizing_enabled = False
def __init__(self, parent=None):
super().__init__(parent)
self.data = None
self.removedAttrs = "-"
self.reducedAttrs = "-"
self.resortedAttrs = "-"
self.removedClasses = "-"
self.reducedClasses = "-"
self.resortedClasses = "-"
boxAt = gui.widgetBox(self.controlArea, "Features")
gui.checkBox(boxAt, self, 'sortValues',
'Sort discrete feature values',
callback=self.optionsChanged)
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, "removeValues",
"Remove unused feature values",
callback=self.optionsChanged)
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, "removeAttributes",
"Remove constant features",
callback=self.optionsChanged)
boxAt = gui.widgetBox(self.controlArea, "Classes", addSpace=True)
gui.checkBox(boxAt, self, 'sortClasses',
'Sort discrete class variable values',
callback=self.optionsChanged)
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, "removeClasses",
"Remove unused class variable values",
callback=self.optionsChanged)
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, "removeClassAttribute",
"Remove constant class variables",
callback=self.optionsChanged)
box3 = gui.widgetBox(self.controlArea, 'Statistics', addSpace=True)
gui.label(box3, self, "Removed features: %(removedAttrs)s")
gui.label(box3, self, "Reduced features: %(reducedAttrs)s")
gui.label(box3, self, "Resorted features: %(resortedAttrs)s")
gui.label(box3, self, "Removed classes: %(removedClasses)s")
gui.label(box3, self, "Reduced classes: %(reducedClasses)s")
gui.label(box3, self, "Resorted classes: %(resortedClasses)s")
gui.auto_commit(self.controlArea, self, "autoSend", "Send Data",
checkbox_label="Send automatically ",
orientation="horizontal")
gui.rubber(self.controlArea)
def setData(self, dataset):
if dataset is not None:
self.data = dataset
self.unconditional_commit()
else:
self.removedAttrs = "-"
self.reducedAttrs = "-"
self.resortedAttrs = "-"
self.removedClasses = "-"
self.reducedClasses = "-"
self.resortedClasses = "-"
self.send("Data", None)
self.data = None
def optionsChanged(self):
self.commit()
def commit(self):
if self.data is None:
return
attr_flags = sum([Remove.SortValues * self.sortValues,
Remove.RemoveConstant * self.removeAttributes,
Remove.RemoveUnusedValues * self.removeValues])
class_flags = sum([Remove.SortValues * self.sortClasses,
Remove.RemoveConstant * self.removeClassAttribute,
Remove.RemoveUnusedValues * self.removeClasses])
remover = Remove(attr_flags, class_flags)
data = remover(self.data)
attr_res, class_res = remover.attr_results, remover.class_results
self.removedAttrs = attr_res['removed']
self.reducedAttrs = attr_res['reduced']
self.resortedAttrs = attr_res['sorted']
self.removedClasses = class_res['removed']
self.reducedClasses = class_res['reduced']
self.resortedClasses = class_res['sorted']
self.send("Data", data)
if __name__ == "__main__":
appl = QtGui.QApplication([])
ow = OWPurgeDomain()
data = Table("car.tab")
subset = [inst for inst in data
if inst["buying"] == "v-high"]
subset = Table(data.domain, subset)
# The "buying" should be removed and the class "y" reduced
ow.setData(subset)
ow.show()
appl.exec_()
ow.saveSettings()
| hugobuddel/orange3 | Orange/widgets/data/owpurgedomain.py | Python | gpl-3.0 | 4,905 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changeset', '0021_auto_20160222_2345'),
]
operations = [
migrations.AlterField(
model_name='suspicionreasons',
name='name',
field=models.CharField(max_length=255, db_index=True),
),
]
| batpad/osmcha-django | osmchadjango/changeset/migrations/0022_auto_20160222_2358.py | Python | gpl-3.0 | 428 |
#!/usr/bin/python
##############################################################################################
# Copyright (C) 2014 Pier Luigi Ventre - (Consortium GARR and University of Rome "Tor Vergata")
# Copyright (C) 2014 Giuseppe Siracusano, Stefano Salsano - (CNIT and University of Rome "Tor Vergata")
# www.garr.it - www.uniroma2.it/netgroup - www.cnit.it
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Topology Parser Utils.
#
# @author Pier Luigi Ventre <[email protected]>
# @author Giuseppe Siracusano <[email protected]>
# @author Stefano Salsano <[email protected]>
class Subnet:
def __init__(self, Type=None):
self.nodes = []
self.links = []
def appendLink(self, link):
if link[0] not in self.nodes:
self.nodes.append(link[0])
if link[1] not in self.nodes:
self.nodes.append(link[1])
self.links.append(link)
| netgroup/Dreamer-Topology-Parser | topo_parser_utils.py | Python | apache-2.0 | 1,368 |
import pytest
import os
from mock import patch, call, Mock, MagicMock
import blt.environment as env
class Commands(env.Commander):
def visible_command(self):
pass
def _hidden_command(self):
pass
# -- Set Fixtures -------------------------------------------------------------
@pytest.fixture
def cmd_center():
filepath = os.path.abspath(os.path.dirname(__file__) + '/beltenvtest.py')
return env.CommandCenter(filepath)
@pytest.fixture
def commander_class():
return env.Commander
@pytest.fixture
def commander_subclass():
return Commands
@pytest.fixture
def commander_instance():
return Commands(None)
# -- Test Cases! --------------------------------------------------------------
def test_get_tool_config(cmd_center):
assert cmd_center.config['staging']['heroku']['app'] == 'pubweb-staging'
def test_loaded_commands(cmd_center):
# we should have the following commands:
# - standard_command
# - default_command
# - command_with_aliases
# - some_really_long_aliased_command
# - siamese (alias to command_with_aliases)
# - twin (alias to command_with_aliases)
# - srl (alias to some_really_long_aliased_command)
# - importedtest
# == 7 total
assert len(cmd_center.commands) == 7
assert sorted(cmd_center.commands.keys()) == [ 'command_with_aliases'
, 'default_command'
, 'importedtest.imported_alias'
, 'importedtest.imported_command'
, 'importedtest.imported_default'
, 'some_really_long_aliased_command'
, 'standard_command'
]
def test_iscommander(commander_subclass):
assert env.iscommander(commander_subclass)
def test_iscommander_skips_commander_base(commander_class):
assert env.iscommander(commander_class) == False
def test_iscommandmethod(commander_instance):
assert env.iscommandmethod(commander_instance.visible_command)
def test_prod_check_run(cmd_center):
env.prod_check = Mock()
cmd_center.run('production', 'default_command')
env.prod_check.assert_called_once_with('default_command')
| dencold/blt | blt/test/test_environment.py | Python | mit | 2,376 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'libs/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
| Praxyk/Praxyk-DevOps | server/_fix_path_.py | Python | gpl-2.0 | 150 |
# Copyright 2016 - 2018 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
from pkg_resources import resource_filename
import marv_node.testing
from marv_node.testing import make_dataset, run_nodes, temporary_directory
from marv_robotics.detail import bagmeta_table as node
from marv_store import Store
class TestCase(marv_node.testing.TestCase):
# TODO: Generate bags instead, but with connection info!
BAGS = [
resource_filename('marv_node.testing._robotics_tests', 'data/test_0.bag'),
resource_filename('marv_node.testing._robotics_tests', 'data/test_1.bag'),
]
async def test_node(self):
with temporary_directory() as storedir:
store = Store(storedir, {})
dataset = make_dataset(self.BAGS)
store.add_dataset(dataset)
streams = await run_nodes(dataset, [node], store)
self.assertNodeOutput(streams[0], node)
# TODO: test also header
| ternaris/marv-robotics | code/marv/marv_node/testing/_robotics_tests/test_widget_bagmeta_table.py | Python | agpl-3.0 | 954 |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas import (
Index,
NaT,
Timedelta,
TimedeltaIndex,
Timestamp,
notna,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem_slice_keeps_name(self):
# GH#4226
tdi = timedelta_range("1d", "5d", freq="H", name="timebucket")
assert tdi[1:].name == tdi.name
def test_getitem(self):
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == Timedelta("1 day")
result = idx[0:5]
expected = timedelta_range("1 day", "5 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = timedelta_range("1 day", "9 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = timedelta_range("12 day", "24 day", freq="3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(
["5 day", "4 day", "3 day", "2 day", "1 day"], freq="-1D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize(
"key",
[
Timestamp("1970-01-01"),
Timestamp("1970-01-02"),
datetime(1970, 1, 1),
Timestamp("1970-01-03").to_datetime64(),
# non-matching NA values
np.datetime64("NaT"),
],
)
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = timedelta_range(0, periods=10)
with pytest.raises(KeyError, match=re.escape(repr(key))):
tdi.get_loc(key)
class TestGetLoc:
@pytest.mark.filterwarnings("ignore:Passing method:FutureWarning")
def test_get_loc(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], "pad", tolerance=Timedelta(0)) == 1
assert idx.get_loc(idx[1], "pad", tolerance=np.timedelta64(0, "s")) == 1
assert idx.get_loc(idx[1], "pad", tolerance=timedelta(0)) == 1
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
idx.get_loc(idx[1], method="nearest", tolerance="foo")
with pytest.raises(ValueError, match="tolerance size must match"):
idx.get_loc(
idx[1],
method="nearest",
tolerance=[
Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64(),
],
)
for method, loc in [("pad", 1), ("backfill", 2), ("nearest", 1)]:
assert idx.get_loc("1 day 1 hour", method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc("0 days") == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"])
assert tidx.get_loc(NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float("nan")) == 1
assert tidx.get_loc(np.nan) == 1
class TestGetIndexer:
def test_get_indexer(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 hour"))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
class TestWhere:
def test_where_doesnt_retain_freq(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx")
result = tdi.where(cond, tdi[::-1])
tm.assert_index_equal(result, expected)
def test_where_invalid_dtypes(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
tail = tdi[2:].tolist()
i2 = Index([NaT, NaT] + tail)
mask = notna(i2)
expected = Index([NaT.value, NaT.value] + tail, dtype=object, name="idx")
assert isinstance(expected[0], int)
result = tdi.where(mask, i2.asi8)
tm.assert_index_equal(result, expected)
ts = i2 + Timestamp.now()
expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
per = (i2 + Timestamp.now()).to_period("D")
expected = Index([per[0], per[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, per)
tm.assert_index_equal(result, expected)
ts = Timestamp.now()
expected = Index([ts, ts] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
def test_where_mismatched_nat(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = np.array([True, False, False])
dtnat = np.datetime64("NaT", "ns")
expected = Index([tdi[0], dtnat, dtnat], dtype=object, name="idx")
assert expected[2] is dtnat
result = tdi.where(cond, dtnat)
tm.assert_index_equal(result, expected)
class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta("1 day")
result = idx.take([-1])
assert result == Timedelta("31 day")
result = idx.take([0, 1, 2])
expected = timedelta_range("1 day", "3 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range("1 day", "5 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range("8 day", "2 day", freq="-3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(["4 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(["29 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_take_equiv_getitem(self):
tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"]
idx = timedelta_range(start="1d", end="2d", freq="H", name="idx")
expected = TimedeltaIndex(tds, freq=None, name="idx")
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(["1 days", "2 days", "3 days"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestMaybeCastSliceBound:
@pytest.fixture(params=["increasing", "decreasing", None])
def monotonic(self, request):
return request.param
@pytest.fixture
def tdi(self, monotonic):
tdi = timedelta_range("1 Day", periods=10)
if monotonic == "decreasing":
tdi = tdi[::-1]
elif monotonic is None:
taker = np.arange(10, dtype=np.intp)
np.random.shuffle(taker)
tdi = tdi.take(taker)
return tdi
def test_maybe_cast_slice_bound_invalid_str(self, tdi):
# test the low-level _maybe_cast_slice_bound and that we get the
# expected exception+message all the way up the stack
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
tdi._maybe_cast_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.get_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.slice_locs("foo", None, None)
def test_slice_invalid_str_with_timedeltaindex(
self, tdi, frame_or_series, indexer_sl
):
obj = frame_or_series(range(10), index=tdi)
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":-1]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[:"foo"]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[tdi[0] : "foo"]
| jorisvandenbossche/pandas | pandas/tests/indexes/timedeltas/test_indexing.py | Python | bsd-3-clause | 12,228 |
"""
Stub version control system, for testing purposes
"""
from __future__ import print_function
from rez.release_vcs import ReleaseVCS
from rez.utils.logging_ import print_warning
from rez.utils.yaml import dump_yaml
from rez.vendor import yaml
import os.path
import time
class StubReleaseVCS(ReleaseVCS):
"""A release VCS that doesn't really do anything. Used by unit tests.
A writable '.stub' file must be present in the project root. Any created
tags are written to this yaml file.
"""
def __init__(self, pkg_root, vcs_root=None):
super(StubReleaseVCS, self).__init__(pkg_root, vcs_root=vcs_root)
self.time = int(time.time())
@classmethod
def name(cls):
return "stub"
@classmethod
def is_valid_root(cls, path):
return os.path.exists(os.path.join(path, '.stub'))
@classmethod
def search_parents_for_root(cls):
return False
def validate_repostate(self):
pass
def get_current_revision(self):
return self.time
def get_changelog(self, previous_revision=None, max_revisions=None):
if previous_revision:
if isinstance(previous_revision, int):
seconds = self.time - previous_revision
return "This commit was %d seconds after the last" % seconds
else:
return "There is a previous commit from a different vcs"
else:
return "This is the first commit"
def tag_exists(self, tag_name):
data = self._read_stub()
return tag_name in data.get("tags", [])
def create_release_tag(self, tag_name, message=None):
data = self._read_stub()
if "tags" not in data:
data["tags"] = {}
elif tag_name in data["tags"]:
print_warning("Skipped tag creation, tag '%s' already exists" % tag_name)
return
print("Creating tag '%s'..." % tag_name)
data["tags"][tag_name] = message
self._write_stub(data)
def _read_stub(self):
with open(os.path.join(self.vcs_root, '.stub')) as f:
return yaml.load(f.read()) or {}
def _write_stub(self, data):
with open(os.path.join(self.vcs_root, '.stub'), 'w') as f:
f.write(dump_yaml(data))
def register_plugin():
return StubReleaseVCS
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| cwmartin/rez | src/rezplugins/release_vcs/stub.py | Python | lgpl-3.0 | 3,019 |
from distutils.core import setup
setup(
name='django-submodel',
version='0.1',
license='MIT',
author='Li Meng',
author_email='[email protected]',
packages=['submodel'],
description='A Django model field which value works like a model instance and supports seamless inline editing in Django admin.',
long_description=open('README.rst').read(),
url='https://github.com/liokm/django-submodel',
download_url='https://github.com/liokm/django-submodel',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| liokm/django-submodel | setup.py | Python | mit | 885 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
#from lib_output import *
from collections import defaultdict
# converts a string in the format dd/mm/yyyy to python datetime
def str_date_to_datetime(str_date):
return datetime.datetime.strptime(str_date, '%d/%m/%Y')
# converts a timestamp to a sting in the format dd/mm/yyyy
def timestamp_to_str_date(str_timestamp):
return datetime.datetime.fromtimestamp(int(str_timestamp)).strftime('%d/%m/%Y')
# converts a timestamp to a python datetime
def timestamp_to_datetime(str_timestamp):
return datetime.datetime.fromtimestamp(int(str_timestamp))
# a datetime o a date string in the format dd/mm/yyyy
def datetime_to_str_date(datetime):
return datetime.strftime('%d/%m/%Y')
# a datetime o a date string in the format dd/mm/yyyy HH
def datetime_to_str_date_hour(datetime):
return datetime.strftime('%d/%m/%Y %H')
# normalize posts by date by adding the missing days in a range of days.
# eg. if a list of dates has 2 posts in day 17/03/2013 then it skips to 5 posts in 19/03/2013
# the 18/03/2013 data point wouldn't exist, this function fills the empty days with zero
def normalize_posts_by_date(dict_int_dates):
list_str_dates = dict_int_dates.keys()
list_str_timestamps = []
for str_date in list_str_dates:
timestamp = datetime.datetime.strptime(str_date, '%d/%m/%Y')
list_str_timestamps.append(timestamp)
max_date = max(list_str_timestamps)
time_step = min(list_str_timestamps)
delta = datetime.timedelta(1)
while time_step < max_date:
str_normal_date = time_step.strftime('%d/%m/%Y')
if str_normal_date in list_str_dates:
pass
else:
dict_int_dates[str_normal_date] = 0
time_step = time_step + delta
# receives a list of timestamp and returns a list of all the days between the minimum and the maximum day
# the returned list is in the format dd/mm/yyyy
def fill_days_list(datetimes_list):
max_date = max(datetimes_list)
delta = datetime.timedelta(1) #one day delta
complete_dates_list = []
temp_date = min(datetimes_list)
while temp_date < max_date:
complete_dates_list.append(temp_date)
temp_date = temp_date + delta
return [datetime_to_str_date(x) for x in complete_dates_list]
# creates the comments per day timeline
def comments_per_day(list_datetime_commments):
list_str_date = fill_days_list(list_datetime_commments)
dict_int_str_date = defaultdict(int)
for str_day in list_str_date:
dict_int_str_date[str_day] += 0
for datetime in list_datetime_commments:
str_date = datetime_to_str_date(datetime)
dict_int_str_date[str_date] += 1
return dict_int_str_date
#-----------------------------------------
# receives a list of timestamp and returns a list of all the days between the minimum and the maximum day
# the returned list is in the format dd/mm/yyyy
def fill_hours_list(datetimes_list):
max_date = max(datetimes_list)
delta = datetime.timedelta(seconds=3600) #one day delta
complete_dates_list = []
temp_date = min(datetimes_list)
while temp_date < max_date:
complete_dates_list.append(temp_date)
temp_date = temp_date + delta
return [datetime_to_str_date_hour(x) for x in complete_dates_list]
# creates the comments per day timeline
def comments_per_hour(list_datetime_commments):
list_str_date = fill_hours_list(list_datetime_commments)
dict_int_str_date = defaultdict(int)
for str_day_hour in list_str_date:
dict_int_str_date[str_day_hour] += 0
for datetime in list_datetime_commments:
str_date_hour = datetime_to_str_date_hour(datetime)
dict_int_str_date[str_date_hour] += 1
return dict_int_str_date
| ufeslabic/parse-facebook | lib_time.py | Python | mit | 3,565 |
import os
module = None
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
__import__(module[:-3], locals(), globals())
del module
| sdgdsffdsfff/redis-ctl | models/__init__.py | Python | mit | 211 |
"""Contains all available scenarios in Flow."""
# base scenario class
from flow.scenarios.base_scenario import Scenario
# custom scenarios
from flow.scenarios.bay_bridge import BayBridgeScenario
from flow.scenarios.bay_bridge_toll import BayBridgeTollScenario
from flow.scenarios.bottleneck import BottleneckScenario
from flow.scenarios.figure_eight import Figure8Scenario
from flow.scenarios.grid import SimpleGridScenario
from flow.scenarios.highway import HighwayScenario
from flow.scenarios.loop import LoopScenario
from flow.scenarios.merge import MergeScenario
from flow.scenarios.loop_merge import TwoLoopsOneMergingScenario
from flow.scenarios.multi_loop import MultiLoopScenario
from flow.scenarios.minicity import MiniCityScenario
__all__ = [
"Scenario", "BayBridgeScenario", "BayBridgeTollScenario",
"BottleneckScenario", "Figure8Scenario", "SimpleGridScenario",
"HighwayScenario", "LoopScenario", "MergeScenario",
"TwoLoopsOneMergingScenario", "MultiLoopScenario", "MiniCityScenario"
]
| cathywu/flow | flow/scenarios/__init__.py | Python | mit | 1,017 |
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='evention <[email protected]>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[evention] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| EricZaporzan/evention | config/settings/production.py | Python | mit | 6,730 |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.thermo.thermodata` module.
"""
import unittest
import math
import numpy
from rmgpy.thermo.thermodata import ThermoData
import rmgpy.constants as constants
################################################################################
class TestThermoData(unittest.TestCase):
"""
Contains unit tests of the :class:`ThermoData` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.H298 = -32.9725
self.S298 = 27.5727
self.Tdata = numpy.array([300,400,500,600,800,1000,1500])
self.Cpdata = numpy.array([6.3827,7.80327,9.22175,10.5528,12.8323,14.6013,17.4089])
self.Cp0 = 4.0
self.CpInf = 21.5
self.Tmin = 100.
self.Tmax = 3000.
self.E0 = -782292.
self.comment = 'C2H6'
self.thermodata = ThermoData(
Tdata = (self.Tdata,"K"),
Cpdata = (self.Cpdata*constants.R,"J/(mol*K)"),
H298 = (self.H298*0.001*constants.R*298.,"kJ/mol"),
S298 = (self.S298*constants.R,"J/(mol*K)"),
Cp0 = (self.Cp0*constants.R,"J/(mol*K)"),
CpInf = (self.CpInf*constants.R,"J/(mol*K)"),
Tmin = (self.Tmin,"K"),
Tmax = (self.Tmax,"K"),
E0 = (self.E0,'J/mol'),
comment = self.comment,
)
def test_Tdata(self):
"""
Test that the ThermoData Tdata property was properly set.
"""
self.assertEqual(self.thermodata.Tdata.value_si.shape, self.Tdata.shape)
for T, T0 in zip(self.thermodata.Tdata.value_si, self.Tdata):
self.assertAlmostEqual(T, T0, 4)
def test_Cpdata(self):
"""
Test that the ThermoData Cpdata property was properly set.
"""
self.assertEqual(self.thermodata.Cpdata.value_si.shape, self.Cpdata.shape)
for Cp, Cp0 in zip(self.thermodata.Cpdata.value_si / constants.R, self.Cpdata):
self.assertAlmostEqual(Cp, Cp0, 4)
def test_H298(self):
"""
Test that the ThermoData H298 property was properly set.
"""
self.assertAlmostEqual(self.thermodata.H298.value_si / constants.R / 298., self.H298, 4)
def test_S298(self):
"""
Test that the ThermoData S298 property was properly set.
"""
self.assertAlmostEqual(self.thermodata.S298.value_si / constants.R, self.S298, 4)
def test_Cp0(self):
"""
Test that the ThermoData Cp0 property was properly set.
"""
self.assertAlmostEqual(self.thermodata.Cp0.value_si / constants.R, self.Cp0, 4)
def test_CpInf(self):
"""
Test that the ThermoData CpInf property was properly set.
"""
self.assertAlmostEqual(self.thermodata.CpInf.value_si / constants.R, self.CpInf, 4)
def test_Tmin(self):
"""
Test that the ThermoData Tmin property was properly set.
"""
self.assertAlmostEqual(self.thermodata.Tmin.value_si, self.Tmin, 6)
def test_Tmax(self):
"""
Test that the ThermoData Tmax property was properly set.
"""
self.assertAlmostEqual(self.thermodata.Tmax.value_si, self.Tmax, 6)
def test_E0(self):
"""
Test that the ThermoData E0 property was properly set.
"""
self.assertAlmostEqual(self.thermodata.E0.value_si, self.E0, 6)
def test_Comment(self):
"""
Test that the ThermoData comment property was properly set.
"""
self.assertEqual(self.thermodata.comment, self.comment)
def test_isTemperatureValid(self):
"""
Test the ThermoData.isTemperatureValid() method.
"""
Tdata = [200,400,600,800,1000,1200,1400,1600,1800,2000]
validdata = [True,True,True,True,True,True,True,True,True,True]
for T, valid in zip(Tdata, validdata):
valid0 = self.thermodata.isTemperatureValid(T)
self.assertEqual(valid0, valid)
def test_getHeatCapacity(self):
"""
Test the ThermoData.getHeatCapacity() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
Cpexplist = numpy.array([4.96208, 7.80327, 10.5528, 12.8323, 14.6013, 15.7243, 16.8473, 17.9704, 19.0934, 20.2165]) * constants.R
for T, Cpexp in zip(Tlist, Cpexplist):
Cpact = self.thermodata.getHeatCapacity(T)
self.assertAlmostEqual(Cpexp, Cpact, 2)
def test_getEnthalpy(self):
"""
Test the ThermoData.getEnthalpy() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
Hexplist = numpy.array([-51.9015, -22.7594, -12.1063, -6.15660, -2.18192, 0.708869, 2.93415, 4.74350, 6.27555, 7.61349]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.thermodata.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e0)
def test_getEntropy(self):
"""
Test the ThermoData.getEntropy() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
Sexplist = numpy.array([25.3347, 29.6460, 33.3386, 36.6867, 39.7402, 42.5016, 45.0098, 47.3328, 49.5142, 51.5841]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.thermodata.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, 3)
def test_getFreeEnergy(self):
"""
Test the ThermoData.getFreeEnergy() method.
"""
Tlist = numpy.array([200,400,600,800,1000,1200,1400,1600,1800,2000])
for T in Tlist:
Gexp = self.thermodata.getEnthalpy(T) - T * self.thermodata.getEntropy(T)
Gact = self.thermodata.getFreeEnergy(T)
self.assertAlmostEqual(Gexp, Gact, 3)
def test_pickle(self):
"""
Test that a ThermoData object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
thermodata = cPickle.loads(cPickle.dumps(self.thermodata))
self.assertEqual(self.thermodata.Tdata.value.shape, thermodata.Tdata.value.shape)
for T, T0 in zip(self.thermodata.Tdata.value, thermodata.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.thermodata.Tdata.units, thermodata.Tdata.units)
self.assertEqual(self.thermodata.Cpdata.value.shape, thermodata.Cpdata.value.shape)
for Cp, Cp0 in zip(self.thermodata.Cpdata.value, thermodata.Cpdata.value):
self.assertAlmostEqual(Cp, Cp0, 3)
self.assertEqual(self.thermodata.Cpdata.units, thermodata.Cpdata.units)
self.assertAlmostEqual(self.thermodata.H298.value, thermodata.H298.value, 4)
self.assertEqual(self.thermodata.H298.units, thermodata.H298.units)
self.assertAlmostEqual(self.thermodata.S298.value, thermodata.S298.value, 2)
self.assertEqual(self.thermodata.S298.units, thermodata.S298.units)
self.assertAlmostEqual(self.thermodata.Cp0.value, thermodata.Cp0.value, 4)
self.assertEqual(self.thermodata.Cp0.units, thermodata.Cp0.units)
self.assertAlmostEqual(self.thermodata.CpInf.value, thermodata.CpInf.value, 3)
self.assertEqual(self.thermodata.CpInf.units, thermodata.CpInf.units)
self.assertAlmostEqual(self.thermodata.Tmin.value, thermodata.Tmin.value, 4)
self.assertEqual(self.thermodata.Tmin.units, thermodata.Tmin.units)
self.assertAlmostEqual(self.thermodata.Tmax.value, thermodata.Tmax.value, 4)
self.assertEqual(self.thermodata.Tmax.units, thermodata.Tmax.units)
self.assertAlmostEqual(self.thermodata.E0.value, thermodata.E0.value, 4)
self.assertEqual(self.thermodata.E0.units, thermodata.E0.units)
self.assertEqual(self.thermodata.comment, thermodata.comment)
def test_repr(self):
"""
Test that a ThermoData object can be successfully reconstructed from its
repr() output with no loss of information.
"""
thermodata = None
exec('thermodata = {0!r}'.format(self.thermodata))
self.assertEqual(self.thermodata.Tdata.value.shape, thermodata.Tdata.value.shape)
for T, T0 in zip(self.thermodata.Tdata.value, thermodata.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.thermodata.Tdata.units, thermodata.Tdata.units)
self.assertEqual(self.thermodata.Cpdata.value.shape, thermodata.Cpdata.value.shape)
for Cp, Cp0 in zip(self.thermodata.Cpdata.value, thermodata.Cpdata.value):
self.assertAlmostEqual(Cp, Cp0, 3)
self.assertEqual(self.thermodata.Cpdata.units, thermodata.Cpdata.units)
self.assertAlmostEqual(self.thermodata.H298.value, thermodata.H298.value, 4)
self.assertEqual(self.thermodata.H298.units, thermodata.H298.units)
self.assertAlmostEqual(self.thermodata.S298.value, thermodata.S298.value, 2)
self.assertEqual(self.thermodata.S298.units, thermodata.S298.units)
self.assertAlmostEqual(self.thermodata.Cp0.value, thermodata.Cp0.value, 4)
self.assertEqual(self.thermodata.Cp0.units, thermodata.Cp0.units)
self.assertAlmostEqual(self.thermodata.CpInf.value, thermodata.CpInf.value, 3)
self.assertEqual(self.thermodata.CpInf.units, thermodata.CpInf.units)
self.assertAlmostEqual(self.thermodata.Tmin.value, thermodata.Tmin.value, 4)
self.assertEqual(self.thermodata.Tmin.units, thermodata.Tmin.units)
self.assertAlmostEqual(self.thermodata.Tmax.value, thermodata.Tmax.value, 4)
self.assertEqual(self.thermodata.Tmax.units, thermodata.Tmax.units)
self.assertAlmostEqual(self.thermodata.E0.value, thermodata.E0.value, 4)
self.assertEqual(self.thermodata.E0.units, thermodata.E0.units)
self.assertEqual(self.thermodata.comment, thermodata.comment)
| KEHANG/RMG-Py | rmgpy/thermo/thermodataTest.py | Python | mit | 11,500 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zhang ZY<http://idupx.blogspot.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.core
import sys
# Importing setuptools adds some features like "setup.py develop", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
kwargs = {}
major, minor = sys.version_info[:2]
python_26 = (major > 2 or (major == 2 and minor >= 6))
version = "1.0.0"
if major >= 3:
import setuptools # setuptools is required for use_2to3
kwargs["use_2to3"] = True
distutils.core.setup(
name="tns",
version=version,
author="Zhang ZY",
author_email="[email protected]",
url="https://github.com/bufferx/twork",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="tns is a neTwork server framework based on twork",
packages = setuptools.find_packages(exclude=["test", "*.log"]),
package_data = {
"tns": ["www/static/favicon.ico"],
},
entry_points = {
'console_scripts': [
'tnsd = tns.bin.tnsd:main',
],
},
**kwargs
)
| bufferx/tns | setup.py | Python | apache-2.0 | 2,220 |
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import unslug
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import time
import werkzeug.urls
class WebsiteMembership(http.Controller):
_references_per_page = 20
@http.route([
'/members',
'/members/page/<int:page>',
'/members/association/<membership_id>',
'/members/association/<membership_id>/page/<int:page>',
'/members/country/<int:country_id>',
'/members/country/<country_name>-<int:country_id>',
'/members/country/<int:country_id>/page/<int:page>',
'/members/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>',
'/members/association/<membership_id>/country/<int:country_id>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
product_obj = request.registry['product.product']
country_obj = request.registry['res.country']
membership_line_obj = request.registry['membership.membership_line']
partner_obj = request.registry['res.partner']
post_name = post.get('name', '')
current_country = None
# base domain for groupby / searches
base_line_domain = [("partner.website_published", "=", True), ('state', 'in', ['free', 'paid'])]
if membership_id and membership_id != 'free':
membership_id = int(membership_id)
today = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
base_line_domain += [
('membership_id', '=', membership_id), ('date_to', '>=', today),
('date_from', '<', today), ('state', '=', 'paid')
]
membership = product_obj.browse(cr, uid, membership_id, context=context)
else:
membership = None
if post_name:
base_line_domain += ['|', ('partner.name', 'ilike', post_name),
('partner.website_description', 'ilike', post_name)]
# group by country, based on all customers (base domain)
if membership_id != 'free':
membership_line_ids = membership_line_obj.search(cr, SUPERUSER_ID, base_line_domain, context=context)
country_domain = [('member_lines', 'in', membership_line_ids)]
else:
membership_line_ids = []
country_domain = [('membership_state', '=', 'free')]
if post_name:
country_domain += ['|', ('name', 'ilike', post_name),
('website_description', 'ilike', post_name)]
countries = partner_obj.read_group(
cr, SUPERUSER_ID, country_domain + [("website_published", "=", True)], ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_total = sum(country_dict['country_id_count'] for country_dict in countries)
line_domain = list(base_line_domain)
if country_id:
line_domain.append(('partner.country_id', '=', country_id))
current_country = country_obj.read(cr, uid, country_id, ['id', 'name'], context)
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
countries.append({
'country_id_count': 0,
'country_id': (country_id, current_country["name"])
})
countries = filter(lambda d:d['country_id'], countries)
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_total,
'country_id': (0, _("All Countries"))
})
# format domain for group_by and memberships
membership_ids = product_obj.search(cr, uid, [('membership', '=', True)], order="website_sequence", context=context)
memberships = product_obj.browse(cr, uid, membership_ids, context=context)
# make sure we don't access to lines with unpublished membershipts
line_domain.append(('membership_id', 'in', membership_ids))
limit = self._references_per_page
offset = limit * (page - 1)
count_members = 0
membership_line_ids = []
# displayed non-free membership lines
if membership_id != 'free':
count_members = membership_line_obj.search_count(cr, SUPERUSER_ID, line_domain, context=context)
if offset <= count_members:
membership_line_ids = tuple(membership_line_obj.search(cr, SUPERUSER_ID, line_domain, offset, limit, context=context))
membership_lines = membership_line_obj.browse(cr, uid, membership_line_ids, context=context)
# TODO: Following line can be deleted in master. Kept for retrocompatibility.
membership_lines = sorted(membership_lines, key=lambda x: x.membership_id.website_sequence)
page_partner_ids = set(m.partner.id for m in membership_lines)
google_map_partner_ids = []
if request.env.ref('website_membership.opt_index_google_map').customize_show:
membership_lines_ids = membership_line_obj.search(cr, uid, line_domain, context=context)
google_map_partner_ids = membership_line_obj.get_published_companies(cr, uid, membership_line_ids, limit=2000, context=context)
search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)]
if post_name:
search_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if country_id:
search_domain += [('country_id', '=', country_id)]
free_partner_ids = partner_obj.search(cr, SUPERUSER_ID, search_domain, context=context)
memberships_data = []
for membership_record in memberships:
memberships_data.append({'id': membership_record.id, 'name': membership_record.name})
memberships_partner_ids = {}
for line in membership_lines:
memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id)
if free_partner_ids:
memberships_data.append({'id': 'free', 'name': _('Free Members')})
if not membership_id or membership_id == 'free':
if count_members < offset + limit:
free_start = max(offset - count_members, 0)
free_end = max(offset + limit - count_members, 0)
memberships_partner_ids['free'] = free_partner_ids[free_start:free_end]
page_partner_ids |= set(memberships_partner_ids['free'])
google_map_partner_ids += free_partner_ids[:2000-len(google_map_partner_ids)]
count_members += len(free_partner_ids)
google_map_partner_ids = ",".join(map(str, google_map_partner_ids))
partners = { p.id: p for p in partner_obj.browse(request.cr, SUPERUSER_ID, list(page_partner_ids), request.context)}
base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '',
'/country/%s' % country_id if country_id else '')
# request pager for lines
pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post)
values = {
'partners': partners,
'membership_lines': membership_lines, # TODO: This line can be deleted in master. Kept for retrocompatibility.
'memberships': memberships, # TODO: This line too.
'membership': membership, # TODO: This line too.
'memberships_data': memberships_data,
'memberships_partner_ids': memberships_partner_ids,
'membership_id': membership_id,
'countries': countries,
'current_country': current_country and [current_country['id'], current_country['name']] or None,
'current_country_id': current_country and current_country['id'] or 0,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_membership.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/members/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_membership.partner", values)
return self.members(**post)
| ingadhoc/odoo | addons/website_membership/controllers/main.py | Python | agpl-3.0 | 9,436 |
from screenlets.options import ColorOption, IntOption
fft = True
peak_heights = [ 0 for i in range( 256 ) ]
peak_acceleration = [ 0.0 for i in range( 256 ) ]
bar_color = ( 0.75, 0.75, 0.75, 0.65 )
bg_color = ( 0.25, 0.25, 0.25, 0.65 )
peak_color = ( 1.0, 1.0, 1.0, 0.8 )
n_cols = 15
col_width = 16
col_spacing = 1
n_rows = 22
row_height = 2
row_spacing = 1
def load_theme ( screenlet ):
screenlet.resize( n_cols * col_width + n_cols * col_spacing, 100 )
screenlet.add_option( ColorOption(
'Impulse', 'bar_color',
bar_color, 'Bar color',
'Example options group using color'
) )
screenlet.add_option( ColorOption(
'Impulse', 'peak_color',
peak_color, 'Peak color',
'Example options group using color')
)
screenlet.add_option( ColorOption(
'Impulse', 'bg_color',
bg_color, 'Background color',
'Example options group using color')
)
screenlet.add_option( IntOption(
'Impulse', 'n_cols',
n_cols, 'Number of columns',
'Example options group using integer',
min=1, max=256
) )
screenlet.add_option( IntOption(
'Impulse', 'col_width',
col_width, 'Column width',
'Example options group using integer',
min=1, max=256
) )
screenlet.add_option( IntOption(
'Impulse', 'col_spacing',
col_spacing, 'Column Spacing',
'Example options group using integer',
min=1, max=256
) )
screenlet.add_option( IntOption(
'Impulse', 'n_rows',
n_rows, 'Number of rows',
'Example options group using integer',
min=1, max=256
) )
screenlet.add_option( IntOption(
'Impulse', 'row_height',
row_height, 'Row height',
'Example options group using integer',
min=1, max=256
) )
screenlet.add_option( IntOption(
'Impulse', 'row_spacing',
row_spacing, 'Row Spacing',
'Example options group using integer',
min=1, max=256
) )
def on_after_set_attribute ( self, name, value, screenlet ):
setattr( self, name, value )
screenlet.resize( n_cols * ( col_width + col_spacing ), n_rows * ( row_height + row_spacing ) )
def on_draw ( audio_sample_array, cr, screenlet ):
freq = len( audio_sample_array ) / n_cols
cr.set_source_rgba( bar_color[ 0 ], bar_color[ 1 ], bar_color[ 2 ], bar_color[ 3 ] )
for i in range( 0, len( audio_sample_array ), freq ):
col = i / freq
rows = int( audio_sample_array[ i ] * ( n_rows - 2 ) )
for row in range( 0, rows ):
cr.rectangle(
col * ( col_width + col_spacing ),
screenlet.height - row * ( row_height + row_spacing ),
col_width, -row_height
)
cr.fill()
cr.set_source_rgba( bg_color[ 0 ], bg_color[ 1 ], bg_color[ 2 ], bg_color[ 3 ] )
for i in range( 0, len( audio_sample_array ), freq ):
col = i / freq
rows = int( audio_sample_array[ i ] * ( n_rows - 2 ) )
for row in range( rows, n_rows ):
cr.rectangle(
col * ( col_width + col_spacing ),
screenlet.height - row * ( row_height + row_spacing ),
col_width, -row_height
)
cr.fill()
cr.set_source_rgba( peak_color[ 0 ], peak_color[ 1 ], peak_color[ 2 ], peak_color[ 3 ] )
for i in range( 0, len( audio_sample_array ), freq ):
col = i / freq
rows = int( audio_sample_array[ i ] * ( n_rows - 2 ) )
if rows > peak_heights[ i ]:
peak_heights[ i ] = rows
peak_acceleration[ i ] = 0.0
else:
peak_acceleration[ i ] += .1
peak_heights[ i ] -= peak_acceleration[ i ]
if peak_heights[ i ] < 0:
peak_heights[ i ] = 0
cr.rectangle(
col * ( col_width + col_spacing ),
screenlet.height - peak_heights[ i ] * ( row_height + row_spacing ),
col_width, -row_height
)
cr.fill()
cr.fill( )
cr.stroke( )
| kb3dow/dotfiles | conky/ConkyBar/Impulse/Themes/default/__init__.py | Python | gpl-3.0 | 3,535 |
# -*- Encoding: utf-8 -*-
###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This plugin keeps a database of larts, and larts with it.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.strike
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| bnrubin/ubuntu-bots | Lart/__init__.py | Python | gpl-2.0 | 2,491 |
#!/usr/bin/env python
def compteVrais( *liste ):
compte = 0
for i in liste:
if i:
compte += 1
return compte
plageValeurs = ( True, False )
print( "XOR - 2 opérandes" )
for a in plageValeurs:
for b in plageValeurs:
print( "%s ^ %s = %s"%( a, b, a^b ) )
print( "\nXOR - 3 opérandes" )
for a in plageValeurs:
for b in plageValeurs:
for c in plageValeurs:
nbVrais = compteVrais( a, b, c )
print( "%d vrais = %s = (%s ^ %s ^ %s) "%( nbVrais, a^b^c, a, b, c ) )
print( "\nXOR - 4 opérandes" )
for a in plageValeurs:
for b in plageValeurs:
for c in plageValeurs:
for d in plageValeurs:
nbVrais = compteVrais( a, b, c, d )
print( "%d vrais = %s = (%s ^ %s ^ %s ^ %s)"%( nbVrais, a^b^c^d, a, b, c, d ) )
| EricMinso/Scripts | ExemplesPython/xor-test.py | Python | gpl-3.0 | 749 |
#!/usr/bin/env python
from __future__ import print_function
import sys, textwrap
print()
print("# The tool was invoked with these arguments:")
print("# " + "\n# ".join(textwrap.wrap(str(sys.argv[1:]))))
| timodonnell/sefara | docs/example_tool.py | Python | apache-2.0 | 205 |
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from itertools import groupby
from operator import attrgetter
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shoop.core import taxing
from shoop.core.taxing.utils import calculate_compounded_added_taxes
from shoop.default_tax.models import TaxRule
from shoop.utils.iterables import first
class DefaultTaxModule(taxing.TaxModule):
identifier = "default_tax"
name = _("Default Taxation")
def get_taxed_price_for(self, context, item, price):
return _calculate_taxes(price, context, item.tax_class)
def _calculate_taxes(price, taxing_context, tax_class):
rules = _get_enabled_tax_rules(taxing_context, tax_class)
tax_groups = get_taxes_of_effective_rules(taxing_context, rules)
return calculate_compounded_added_taxes(price, tax_groups)
def _get_enabled_tax_rules(taxing_context, tax_class):
"""
Get enabled tax rules from the db for given parameters.
Returned rules are ordered desceding by override group and then
ascending by priority (as required by `_filter_and_group_rules`).
:type taxing_context: shoop.core.taxing.TaxingContext
:type tax_class: shoop.core.models.TaxClass
"""
tax_rules = TaxRule.objects.filter(enabled=True, tax_classes=tax_class)
if taxing_context.customer_tax_group:
tax_rules = tax_rules.filter(
Q(customer_tax_groups=taxing_context.customer_tax_group) |
Q(customer_tax_groups=None))
tax_rules = tax_rules.order_by('-override_group', 'priority')
return tax_rules
def get_taxes_of_effective_rules(taxing_context, tax_rules):
"""
Get taxes grouped by priority from effective tax rules.
Effective tax rules is determined by first limiting the scope to the
rules that match the given taxing context (see `TaxRule.match`) and
then further limiting the matching rules by selecting only the rules
in the highest numbered override group.
The `Tax` objects in the effective rules will be grouped by the
priority of the rules. The tax groups are returned as list of tax
lists.
:type taxing_context: shoop.core.taxing.TaxingContext
:param tax_rules:
Tax rules to filter from. These should be ordered desceding by
override group and then ascending by priority.
:type tax_rules: Iterable[TaxRule]
:rtype: list[list[shoop.core.models.Tax]]
"""
# Limit our scope to only matching rules
matching_rules = (
tax_rule for tax_rule in tax_rules
if tax_rule.matches(taxing_context))
# Further limit our scope to the highest numbered override group
grouped_by_override = groupby(matching_rules, attrgetter('override_group'))
highest_override_group = first(grouped_by_override, (None, []))[1]
# Group rules by priority
grouped_rules = groupby(highest_override_group, attrgetter('priority'))
tax_groups = [
[rule.tax for rule in rules]
for (_, rules) in grouped_rules]
return tax_groups
| jorge-marques/shoop | shoop/default_tax/module.py | Python | agpl-3.0 | 3,217 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('author_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('author', models.ForeignKey('author_app.Author')),
],
),
]
| BrotherPhil/django | tests/migrations/migrations_test_apps/alter_fk/book_app/migrations/0001_initial.py | Python | bsd-3-clause | 569 |
"""
(c) 2013 Rachel Sanders. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import wraps
import logging
from flask import abort, current_app, url_for
from flask import redirect as _redirect
from flask.signals import Namespace
__version__ = '0.7-dev'
log = logging.getLogger(u'flask-featureflags')
RAISE_ERROR_ON_MISSING_FEATURES = u'RAISE_ERROR_ON_MISSING_FEATURES'
FEATURE_FLAGS_CONFIG = u'FEATURE_FLAGS'
EXTENSION_NAME = "FeatureFlags"
class StopCheckingFeatureFlags(Exception):
""" Raise this inside of a feature flag handler to immediately return False and stop any further handers from running """
pass
class NoFeatureFlagFound(Exception):
""" Raise this when the feature flag does not exist. """
pass
_ns = Namespace()
missing_feature = _ns.signal('missing-feature')
def AppConfigFlagHandler(feature=None):
""" This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
}
"""
if not current_app:
log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature))
return False
try:
return current_app.config[FEATURE_FLAGS_CONFIG][feature]
except (AttributeError, KeyError):
raise NoFeatureFlagFound()
class FeatureFlag(object):
JINJA_TEST_NAME = u'active_feature'
def __init__(self, app=None):
if app is not None:
self.init_app(app)
# The default out-of-the-box handler looks up features in Flask's app config.
self.handlers = [AppConfigFlagHandler]
def init_app(self, app):
""" Add ourselves into the app config and setup, and add a jinja function test """
app.config.setdefault(FEATURE_FLAGS_CONFIG, {})
app.config.setdefault(RAISE_ERROR_ON_MISSING_FEATURES, False)
if hasattr(app, "add_template_test"):
# flask 0.10 and higher has a proper hook
app.add_template_test(self.check, name=self.JINJA_TEST_NAME)
else:
app.jinja_env.tests[self.JINJA_TEST_NAME] = self.check
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions[EXTENSION_NAME] = self
def clear_handlers(self):
""" Clear all handlers. This effectively turns every feature off."""
self.handlers = []
def add_handler(self, function):
""" Add a new handler to the end of the chain of handlers. """
self.handlers.append(function)
def remove_handler(self, function):
""" Remove a handler from the chain of handlers. """
try:
self.handlers.remove(function)
except ValueError: # handler wasn't in the list, pretend we don't notice
pass
def check(self, feature):
""" Loop through all our feature flag checkers and return true if any of them are true.
The order of handlers matters - we will immediately return True if any handler returns true.
If you want to a handler to return False and stop the chain, raise the StopCheckingFeatureFlags exception."""
found = False
for handler in self.handlers:
try:
if handler(feature):
return True
except StopCheckingFeatureFlags:
return False
except NoFeatureFlagFound:
pass
else:
found = True
if not found:
message = u"No feature flag defined for {feature}".format(feature=feature)
if current_app.debug and current_app.config.get(RAISE_ERROR_ON_MISSING_FEATURES, False):
raise KeyError(message)
else:
log.info(message)
missing_feature.send(self, feature=feature)
return False
def is_active(feature):
""" Check if a feature is active """
if current_app:
feature_flagger = current_app.extensions.get(EXTENSION_NAME)
if feature_flagger:
return feature_flagger.check(feature)
else:
raise AssertionError("Oops. This application doesn't have the Flask-FeatureFlag extention installed.")
else:
log.warn(u"Got a request to check for {feature} but we're running outside the request context. Check your setup. Returning False".format(feature=feature))
return False
def is_active_feature(feature, redirect_to=None, redirect=None):
"""
Decorator for Flask views. If a feature is off, it can either return a 404 or redirect to a URL if you'd rather.
"""
def _is_active_feature(func):
@wraps(func)
def wrapped(*args, **kwargs):
if not is_active(feature):
url = redirect_to
if redirect:
url = url_for(redirect)
if url:
log.debug(u'Feature {feature} is off, redirecting to {url}'.format(feature=feature, url=url))
return _redirect(url, code=302)
else:
log.debug(u'Feature {feature} is off, aborting request'.format(feature=feature))
abort(404)
return func(*args, **kwargs)
return wrapped
return _is_active_feature
# Silence that annoying No handlers could be found for logger "flask-featureflags"
class NullHandler(logging.Handler):
def emit(self, record):
pass
log.addHandler(NullHandler())
| iromli/Flask-FeatureFlags | flask_featureflags/__init__.py | Python | apache-2.0 | 5,814 |
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
to_aggregator("min"),
to_aggregator("median"),
to_aggregator("mode"),
to_aggregator("mean"),
to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=NUMERICAL_TO_NUMERICAL["identity"],
B=BINARY_TO_NUMERICAL["identity"],
C=CATEGORICAL_TO_NUMERICAL["binarize"],
)
CATEGORICAL_CONVERTERS = dict(
N=NUMERICAL_TO_CATEGORICAL["discretizer10"],
B=BINARY_TO_CATEGORICAL["identity"],
C=CATEGORICAL_TO_CATEGORICAL["identity"],
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
| diogo149/autocause | autocause/autocause_settings.py | Python | mit | 7,414 |
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from oslo.config import cfg
SQLALCHEMY_OPTIONS = (
cfg.StrOpt('uri', default='sqlite:///:memory:',
help='An sqlalchemy URL'),
)
SQLALCHEMY_GROUP = 'drivers:storage:sqlalchemy'
def _config_options():
return [(SQLALCHEMY_GROUP, SQLALCHEMY_OPTIONS)]
| rackerlabs/marconi | marconi/queues/storage/sqlalchemy/options.py | Python | apache-2.0 | 856 |
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
do_subsets(nums, 0, [], result)
return result
def do_subsets(nums, i, bk, result):
if i == len(nums):
result.append(bk)
return
nbk = bk[:]
nbk.append(nums[i])
# NOTE: if we want to optimize memory further, we can
# save length of bk before we call branch 1
# then restore bk to original length, and append current element, then run branch 2
# the branch that doesn't contain the current element
do_subsets(nums, i + 1, bk, result)
# the branch that contains the current element
do_subsets(nums, i + 1, nbk, result)
| kingsamchen/Eureka | crack-data-structures-and-algorithms/leetcode/subsets_q78.py | Python | mit | 750 |
from django.conf import settings
from django.utils import translation
from geotrek.tourism import models as tourism_models
from geotrek.tourism.views import TouristicContentViewSet, TouristicEventViewSet
from geotrek.trekking.management.commands.sync_rando import Command as BaseCommand
# Register mapentity models
from geotrek.tourism import urls # NOQA
class Command(BaseCommand):
def sync_content(self, lang, content):
self.sync_pdf(lang, content)
for picture, resized in content.resized_pictures:
self.sync_media_file(lang, resized)
def sync_event(self, lang, event):
self.sync_pdf(lang, event)
for picture, resized in event.resized_pictures:
self.sync_media_file(lang, resized)
def sync_tourism(self, lang):
self.sync_geojson(lang, TouristicContentViewSet, 'touristiccontents')
self.sync_geojson(lang, TouristicEventViewSet, 'touristicevents')
contents = tourism_models.TouristicContent.objects.existing().order_by('pk')
contents = contents.filter(**{'published_{lang}'.format(lang=lang): True})
for content in contents:
self.sync_content(lang, content)
events = tourism_models.TouristicEvent.objects.existing().order_by('pk')
events = events.filter(**{'published_{lang}'.format(lang=lang): True})
for event in events:
self.sync_event(lang, event)
def sync(self):
super(Command, self).sync()
self.sync_static_file('**', 'tourism/touristicevent.svg')
self.sync_pictograms('**', tourism_models.InformationDeskType)
self.sync_pictograms('**', tourism_models.TouristicContentCategory)
self.sync_pictograms('**', tourism_models.TouristicContentType)
self.sync_pictograms('**', tourism_models.TouristicEventType)
for lang in settings.MODELTRANSLATION_LANGUAGES:
translation.activate(lang)
self.sync_tourism(lang)
| johan--/Geotrek | geotrek/tourism/management/commands/sync_rando.py | Python | bsd-2-clause | 1,966 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# KryPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 21 17:54:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# According to hint from
# <http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/>.
import mock
MOCK_MODULES = ['scipy',
'scipy.linalg', 'scipy.linalg.blas',
'scipy.sparse', 'scipy.sparse.sputils']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'KryPy'
copyright = u'2013—2014, André Gaul'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from krypy import __version__
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KryPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'papersize': 'a4paper',
'preamble': r'''
\usepackage{amsmath}
\usepackage{amssymb}
'''
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'KryPy.tex', 'KryPy Documentation',
'André Gaul', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'krypy', 'KryPy Documentation',
['André Gaul'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KryPy', 'KryPy Documentation',
'André Gaul', 'KryPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| highlando/krypy | docs/conf.py | Python | mit | 8,372 |
from .Child import Child
from .Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='PrefixAmpersandToken'),
Child('Expression', kind='Expr'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('TupleExprElementList', kind='SyntaxCollection',
element='TupleExprElement'),
Node('ArrayElementList', kind='SyntaxCollection',
element='ArrayElement'),
Node('DictionaryElementList', kind='SyntaxCollection',
element='DictionaryElement'),
Node('StringLiteralSegments', kind='SyntaxCollection',
element='Syntax', element_name='Segment',
element_choices=['StringSegment', 'ExpressionSegment']),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryExpr', kind='Expr',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child('Expression', kind='Expr'),
]),
# The await operator.
# await foo()
Node('AwaitExpr', kind='Expr',
children=[
Child('AwaitKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['await']),
Child('Expression', kind='Expr'),
]),
# declname-arguments -> '(' declname-argument-list ')'
# declname-argument-list -> declname-argument*
# declname-argument -> identifier ':'
Node('DeclNameArgument', kind='Syntax',
children=[
Child('Name', kind='Token'),
Child('Colon', kind='ColonToken'),
]),
Node('DeclNameArgumentList', kind='SyntaxCollection',
element='DeclNameArgument'),
Node('DeclNameArguments', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='DeclNameArgumentList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
# An identifier expression.
Node('IdentifierExpr', kind='Expr',
children=[
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'SelfToken',
'CapitalSelfToken',
'DollarIdentifierToken',
'SpacedBinaryOperatorToken',
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# An 'super' expression.
Node('SuperRefExpr', kind='Expr',
children=[
Child('SuperKeyword', kind='SuperToken'),
]),
# A nil expression.
Node('NilLiteralExpr', kind='Expr',
children=[
Child('NilKeyword', kind='NilToken'),
]),
# A _ expression.
Node('DiscardAssignmentExpr', kind='Expr',
children=[
Child('Wildcard', kind='WildcardToken'),
]),
# An = expression.
Node('AssignmentExpr', kind='Expr',
children=[
Child('AssignToken', kind='EqualToken'),
]),
# A flat list of expressions before sequence folding, e.g. 1 + 2 + 3.
Node('SequenceExpr', kind='Expr',
children=[
Child('Elements', kind='ExprList',
collection_element_name='Element'),
]),
Node('ExprList', kind='SyntaxCollection',
element='Expr',
element_name='Expression',
description='''
A list of expressions connected by operators. This list is contained
by a `SequenceExprSyntax`.
'''),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #fileID expression.
Node('PoundFileIDExpr', kind='Expr',
children=[
Child('PoundFileID', kind='PoundFileIDToken'),
]),
# A #filePath expression.
Node('PoundFilePathExpr', kind='Expr',
children=[
Child('PoundFilePath', kind='PoundFilePathToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# A #dsohandle expression.
Node('PoundDsohandleExpr', kind='Expr',
children=[
Child('PoundDsohandle', kind='PoundDsohandleToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# An operator like + or -.
# NOTE: This appears only in SequenceExpr.
Node('BinaryOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='BinaryOperatorToken'),
]),
# arrow-expr -> 'async'? 'throws'? '->'
# NOTE: This appears only in SequenceExpr.
Node('ArrowExpr', kind='Expr',
children=[
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsToken', kind='ThrowsToken',
is_optional=True),
Child('ArrowToken', kind='ArrowToken'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('FloatingDigits', kind='FloatingLiteralToken'),
],
must_uphold_invariant=True),
Node('TupleExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='TupleExprElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# Array literal, e.g. [1, 2, 3]
Node('ArrayExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Elements', kind='ArrayElementList',
collection_element_name='Element'),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# Dictionary literal, e.g. [1:1, 2:2, 3:3]
Node('DictionaryExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Content', kind='Syntax',
node_choices=[
Child('Colon', kind='ColonToken'),
Child('Elements', kind='DictionaryElementList'),
]),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# An element inside a tuple element list
Node('TupleExprElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# element inside an array expression: expression ','?
Node('ArrayElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# element inside an array expression: expression ','?
Node('DictionaryElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('KeyExpression', kind='Expr'),
Child('Colon', kind='ColonToken'),
Child('ValueExpression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Digits', kind='IntegerLiteralToken'),
],
must_uphold_invariant=True),
# true or false
Node('BooleanLiteralExpr', kind='Expr',
children=[
Child("BooleanLiteral", kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
])
]),
# a ? 1 : 0
Node('TernaryExpr', kind='Expr',
children=[
Child("ConditionExpression", kind='Expr'),
Child("QuestionMark", kind='InfixQuestionMarkToken'),
Child("FirstChoice", kind='Expr'),
Child("ColonMark", kind='ColonToken'),
Child("SecondChoice", kind='Expr')
]),
# expr?.name
Node('MemberAccessExpr', kind='Expr',
children=[
# The base needs to be optional to parse expressions in key paths
# like \.a
Child("Base", kind='Expr', is_optional=True),
Child("Dot", kind='Token',
token_choices=[
'PeriodToken', 'PrefixPeriodToken'
]),
# Name could be 'self'
Child("Name", kind='Token'),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# is TypeName
Node('IsExpr', kind='Expr',
children=[
Child("IsTok", kind='IsToken'),
Child("TypeName", kind='Type')
]),
# as TypeName
Node('AsExpr', kind='Expr',
children=[
Child("AsTok", kind='AsToken'),
Child("QuestionOrExclamationMark", kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child("TypeName", kind='Type')
]),
# Type
Node('TypeExpr', kind='Expr',
children=[
Child('Type', kind='Type'),
]),
Node('ClosureCaptureItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
# FIXME: Add a 'CaptureSpecifier' node kind for `Specifier`.
Child("Specifier", kind='TokenList',
collection_element_name='SpecifierToken', is_optional=True),
Child("Name", kind='IdentifierToken', is_optional=True),
Child('AssignToken', kind='EqualToken', is_optional=True),
Child("Expression", kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('ClosureCaptureItemList', kind='SyntaxCollection',
element='ClosureCaptureItem'),
Node('ClosureCaptureSignature', kind='Syntax',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Items', kind='ClosureCaptureItemList',
collection_element_name='Item', is_optional=True),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
Node('ClosureParam', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# a, b, c
Node('ClosureParamList', kind='SyntaxCollection', element='ClosureParam'),
Node('ClosureSignature', kind='Syntax',
children=[
Child('Capture', kind='ClosureCaptureSignature',
is_optional=True),
Child('Input', kind='Syntax', is_optional=True,
node_choices=[
Child('SimpleInput', kind='ClosureParamList'),
Child('Input', kind='ParameterClause'),
]),
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsTok', kind='ThrowsToken', is_optional=True),
Child('Output', kind='ReturnClause', is_optional=True),
Child('InTok', kind='InToken'),
]),
Node('ClosureExpr', kind='Expr',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Signature', kind='ClosureSignature', is_optional=True),
Child('Statements', kind='CodeBlockItemList',
collection_element_name='Statement'),
Child('RightBrace', kind='RightBraceToken'),
]),
# unresolved-pattern-expr -> pattern
Node('UnresolvedPatternExpr', kind='Expr',
children=[
Child('Pattern', kind='Pattern'),
]),
# trailing-closure-element -> identifier ':' closure-expression
Node('MultipleTrailingClosureElement', kind='Syntax',
children=[
Child('Label', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken'),
Child('Closure', kind='ClosureExpr'),
]),
Node('MultipleTrailingClosureElementList', kind='SyntaxCollection',
element='MultipleTrailingClosureElement'),
# call-expr -> expr '(' call-argument-list ')' closure-expr?
# | expr closure-expr
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken',
is_optional=True),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
Child('AdditionalTrailingClosures',
kind='MultipleTrailingClosureElementList',
collection_element_name='AdditionalTrailingClosure',
is_optional=True),
]),
# subscript-expr -> expr '[' call-argument-list ']' closure-expr?
Node('SubscriptExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftBracket', kind='LeftSquareBracketToken'),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightBracket', kind='RightSquareBracketToken'),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
Child('AdditionalTrailingClosures',
kind='MultipleTrailingClosureElementList',
collection_element_name='AdditionalTrailingClosure',
is_optional=True),
]),
# optional-chaining-expr -> expr '?'
Node('OptionalChainingExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# forced-value-expr -> expr '!'
Node('ForcedValueExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# postfix-unary-expr -> expr postfix-operator
Node('PostfixUnaryExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('OperatorToken', kind='PostfixOperatorToken'),
]),
# specialize-expr -> expr generic-argument-clause?
Node('SpecializeExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('GenericArgumentClause', kind='GenericArgumentClause'),
]),
# string literal segment in a string interpolation expression.
Node('StringSegment', kind='Syntax',
children=[
Child('Content', kind='StringSegmentToken'),
]),
# expression segment in a string interpolation expression.
Node('ExpressionSegment', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('Backslash', kind='BackslashToken'),
Child('Delimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('LeftParen', kind='LeftParenToken',
classification='StringInterpolationAnchor',
force_classification=True),
Child('Expressions', kind='TupleExprElementList',
collection_element_name='Expression'),
Child('RightParen', kind='StringInterpolationAnchorToken'),
]),
# e.g. "abc \(foo()) def"
Node('StringLiteralExpr', kind='Expr',
children=[
Child('OpenDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('OpenQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('Segments', kind='StringLiteralSegments',
collection_element_name='Segment'),
Child('CloseQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('CloseDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
]),
# e.g. "\a.b[2].a"
Node('KeyPathExpr', kind='Expr',
children=[
Child('Backslash', kind='BackslashToken'),
Child('RootExpr', kind='Expr', is_optional=True,
node_choices=[
Child('IdentifierExpr', kind='IdentifierExpr'),
Child('SpecializeExpr', kind='SpecializeExpr')
]),
Child('Expression', kind='Expr'),
]),
# The period in the key path serves as the base on which the
# right-hand-side of the key path is evaluated
Node('KeyPathBaseExpr', kind='Expr',
children=[
Child('Period', kind='PeriodToken'),
]),
# e.g. "a." or "a"
Node('ObjcNamePiece', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('Dot', kind='PeriodToken', is_optional=True),
]),
# e.g. "a.b.c"
Node('ObjcName', kind='SyntaxCollection', element='ObjcNamePiece'),
# e.g. "#keyPath(a.b.c)"
Node('ObjcKeyPathExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('KeyPath', kind='PoundKeyPathToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='ObjcName',
collection_element_name='NamePiece'),
Child('RightParen', kind='RightParenToken'),
]),
# e.g. "#selector(getter:Foo.bar)"
Node('ObjcSelectorExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('PoundSelector', kind='PoundSelectorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Kind', kind='ContextualKeywordToken',
text_choices=['getter', 'setter'],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Name', kind='Expr'),
Child('RightParen', kind='RightParenToken'),
]),
# <#content#>
Node('EditorPlaceholderExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
]),
# #fileLiteral(a, b, c)
Node('ObjectLiteralExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('Identifier', kind='Token',
token_choices=[
'PoundColorLiteralToken',
'PoundFileLiteralToken',
'PoundImageLiteralToken',
]),
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
]
| nathawes/swift | utils/gyb_syntax_support/ExprNodes.py | Python | apache-2.0 | 21,870 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cgi
import urllib
import webapp2
from google.appengine.ext import ndb
from google.appengine.api import users
import jinja2
import os
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Greeting(ndb.Model):
"""Models an individual guestbook entry with author, content, and date."""
author = ndb.UserProperty()
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_book(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(-cls.date)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
#self.response.out.write(self.request.environ)
guestbook_name = self.request.get('guestbook_name')
# There is no need to actually create the parent Book entity; we can
# set it to be the parent of another entity without explicitly creating it
ancestor_key = ndb.Key("Book", guestbook_name or "*notitle*")
greetings = Greeting.query_book(ancestor_key).fetch(20)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'url': url,
'url_linktext': url_linktext,
'guestbook_name': guestbook_name
}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Guestbook(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
content = self.request.get('content')
else:
content = 'From ' + self.request.environ['REMOTE_ADDR'] + ' ' + self.request.get('content')
# Set parent key on each greeting to ensure that each
# guestbook's greetings are in the same entity group.
guestbook_name = self.request.get('guestbook_name')
# There is no need to actually create the parent Book entity; we can
# set it to be the parent of another entity without explicitly creating it
greeting = Greeting(parent=ndb.Key("Book", guestbook_name or "*notitle*"),
content = content)
if user:
greeting.author = user
greeting.put()
self.redirect('/?' + urllib.urlencode({'guestbook_name': guestbook_name}))
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook)
])
| pynewb/appengine | appengine101/main.py | Python | mit | 3,084 |
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_user_role
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create or destroy user roles
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the role to manage.
required: true
command_directory_name:
description:
- The command or command directory to which the role has an access.
required: true
access_level:
description:
- The name of the role to manage.
choices: ['none', 'readonly', 'all']
default: 'all'
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User Role
na_cdot_user_role:
state: present
name: ansibleRole
command_directory_name: DEFAULT
access_level: none
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUserRole(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
command_directory_name=dict(required=True, type='str'),
access_level=dict(required=False, type='str', default='all',
choices=['none', 'readonly', 'all']),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.command_directory_name = p['command_directory_name']
self.access_level = p['access_level']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_role(self):
"""
Checks if the role exists for specific command-directory-name.
:return:
True if role found
False if role is not found
:rtype: bool
"""
security_login_role_get_iter = netapp_utils.zapi.NaElement(
'security-login-role-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-info', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_role_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(
security_login_role_get_iter, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
# Error 16031 denotes a role not being found.
if to_native(e.code) == "16031":
return False
else:
self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_role(self):
role_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-create', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name,
'access-level':
self.access_level})
try:
self.server.invoke_successfully(role_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_role(self):
role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-delete', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
try:
self.server.invoke_successfully(role_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
role_exists = self.get_role()
if role_exists:
if self.state == 'absent':
changed = True
# Check if properties need to be updated
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not role_exists:
self.create_role()
# Update properties
elif self.state == 'absent':
self.delete_role()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUserRole()
v.apply()
if __name__ == '__main__':
main()
| Tatsh-ansible/ansible | lib/ansible/modules/storage/netapp/na_cdot_user_role.py | Python | gpl-3.0 | 6,983 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(
usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin",
action="append",
help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, sys, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s' % name] = value
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug)
# THE END
| hightoon/game_management | bottle.py | Python | mit | 148,406 |
from SubProviders.Subtitle import ISubtitleProvider
from SubProviders.Subtitle.ISubtitleProvider import SUBTITLE_PAGES, SUBTITLE_LANGUAGES
class SubtitleProvider(ISubtitleProvider.ISubtitleProvider):
PROVIDER_NAME = 'English - www.subtitle.co.il'
def __init__(self):
#Set the language
SUBTITLE_PAGES.LANGUAGE = SUBTITLE_LANGUAGES.ENGLISH
#Call to the ctor of the IOpenSubtitlesProvider
super(SubtitleProvider, self).__init__()
| yosi-dediashvili/SubiT | src/SubProviders/Subtitle/eng_SubtitleProvider/SubtitleProvider.py | Python | gpl-3.0 | 484 |
# -*- coding: utf-8 -*-
from mock import patch
from unittest import TestCase
from datetime import date, timedelta
from django.http import Http404
from django.test import RequestFactory, override_settings
from fr_notices.navigation import make_preamble_nav
from regulations.generator.layers import diff_applier
from regulations.views import preamble
from regulations.views.preamble import CommentState
class PreambleViewTests(TestCase):
_mock_preamble = dict(text='1', label=['1'], node_type='', children=[
dict(text='2', label=['1', 'c'], node_type='', children=[
dict(text='3', label=['1', 'c', 'i'], node_type='', children=[]),
dict(text='4', label=['1', 'c', 'x'], node_type='', children=[])
]),
dict(text='5', label=['1', '1'], node_type='', children=[])
])
def test_find_subtree(self):
"""When a node is present in a tree, we should be able to find it.
When it is not, we should get None"""
root = self._mock_preamble
fn = preamble.find_subtree
self.assertEqual(fn(root, ['1'])['text'], '1')
self.assertEqual(fn(root, ['1', 'c'])['text'], '2')
self.assertEqual(fn(root, ['1', 'c', 'i'])['text'], '3')
self.assertEqual(fn(root, ['1', 'c', 'x'])['text'], '4')
self.assertEqual(fn(root, ['1', '1'])['text'], '5')
self.assertIsNone(fn(root, ['2']))
self.assertIsNone(fn(root, ['1', '2']))
self.assertIsNone(fn(root, ['1', 'c', 'r']))
self.assertIsNone(fn(root, ['1', 'c', 'i', 'r']))
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_integration(self, ApiReader, api_reader, CFRChangeBuilder):
"""Verify that the contexts are built correctly before being sent to
the template. AJAX/partial=true requests should only get the inner
context (i.e. no UI-related context)"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1/c/x?layers=meta'
response = view(RequestFactory().get(path), paragraphs='1/c/x')
self.assertEqual(
response.context_data['sub_context']['node']['text'], '4')
self.assertEqual(
response.context_data['sub_context']['node']['children'], [])
# layer data is present
self.assertEqual(
response.context_data['sub_context']['node']['meta'], 'something')
self.assertEqual(
response.context_data['preamble_toc'],
make_preamble_nav(self._mock_preamble['children']),
)
self.assertNotIn('node', response.context_data)
response = view(RequestFactory().get(path + '&partial=true'),
paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
request = RequestFactory().get(
path, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = view(request, paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
@override_settings(
PREAMBLE_INTRO={'1': {'meta': {
'publication_date': '2001-01-01',
'comments_close': (date.today() + timedelta(days=1)).isoformat()
}}})
@patch('regulations.views.preamble.ApiReader')
def test_comments_open_from_settings(self, ApiReader):
"""
Mock the PREAMBLE_INTRO data from settings for this test of the
comments being open.
"""
_, meta, _ = preamble.notice_data('1')
assert meta['comment_state'] == CommentState.OPEN
def _setup_mock_response(self, ApiReader, **kwargs):
"""Mock the ApiReader response, replacing meta data fields with
kwargs"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
notice = {
"action": "Proposed rule",
"agencies": ["Environmental Protection Agency"],
"cfr_title": 40,
"cfr_parts": ["300"],
"comments_close": "2011-09-09",
"dockets": ["EPA-HQ-SFUND-2010-1086",
"FRL-9925-69-OLEM"],
"primary_agency": "Environmental Protection Agency",
"title": ("Addition of a Subsurface Intrusion Component to the "
"Hazard Ranking System"),
"publication_date": "2011-02-02",
"regulatory_id_numbers": ["2050-AG67"],
}
notice.update(kwargs)
ApiReader.return_value.notice.return_value = notice
@patch('regulations.views.preamble.ApiReader')
def test_comments_open(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader, comments_close=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert meta['comment_state'] == CommentState.OPEN
@patch('regulations.views.preamble.ApiReader')
def test_comments_prepub(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader,
publication_date=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert meta['comment_state'] == CommentState.PREPUB
@patch('regulations.views.preamble.ApiReader')
def test_comments_closed(self, ApiReader):
self._setup_mock_response(ApiReader)
_, meta, _ = preamble.notice_data('1')
assert meta['comment_state'] == CommentState.CLOSED
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_top_level_redirect(self, ApiReader, api_reader,
CFRChangeBuilder):
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1'
response = view(RequestFactory().get(path), paragraphs='1')
assert response.status_code == 302
assert response.get('Location') == '/preamble/1/c'
@patch('regulations.views.preamble.ApiReader')
def test_get_404(self, ApiReader):
"""When a requested doc is not present, we should return a 404"""
ApiReader.return_value.preamble.return_value = None
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/c/x'),
paragraphs='1/c/x')
@patch('regulations.views.preamble.ApiReader')
def test_get_subtree_404(self, ApiReader):
"""When a requested _subtree_ is not present, we should 404"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/not/here'),
paragraphs='1/not/here')
@patch('regulations.views.preamble.ApiReader')
def test_notice_data(self, ApiReader):
"""We should try to fetch data corresponding to both the Preamble and
the Notice"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
ApiReader.return_value.notice.return_value = {
'publication_date': '2002-02-02',
'comments_close': '2003-03-03',
'cfr_title': 21, 'cfr_parts': ['123']}
for doc_id in ('123_456', '123-456'):
preamble_, meta, notice = preamble.notice_data(doc_id)
self.assertEqual(preamble_, self._mock_preamble)
assert meta['comment_state'] == CommentState.CLOSED
self.assertEqual(meta['cfr_refs'],
[{'title': 21, 'parts': ['123']}])
self.assertEqual(ApiReader.return_value.preamble.call_args[0][0],
'123_456')
self.assertEqual(ApiReader.return_value.notice.call_args[0][0],
'123-456')
class CFRChangesViewTests(TestCase):
@patch('regulations.views.preamble.ApiReader')
@patch('regulations.views.preamble.generator')
def test_new_regtext_changes(self, generator, ApiReader):
"""We can add a whole new section without explosions"""
amendments = [{'instruction': '3. Add subpart M',
'changes': [
['111-Subpart-M', [{'node': {
'label': ['111', 'Subpart', 'M'],
'title': 'A New Subpart',
'child_labels': ['111-42', '111-43',
'111-44', '111-45']}}]],
['111-42', [{'some': 'thing'}]],
['111-43', [{'some': 'thing'}]],
['111-44', [{'some': 'thing'}]],
['111-45', [{'some': 'thing'}]]]},
{'instruction': '4. Unrelated'}]
version_info = {'111': {'left': '234-567', 'right': '8675-309'}}
# Section did not exist before
ApiReader.return_value.regulation.return_value = None
diff = {'111-44': {'op': 'added', 'node': {
'text': 'New node text', 'node_type': 'regtext',
'label': ['111', '44']}}}
generator.get_diff_applier.return_value = diff_applier.DiffApplier(
diff, '111-44')
generator.diff_layer_appliers.return_value = []
result = preamble.CFRChangesView.regtext_changes_context(
amendments, version_info, '111-44', '8675-309', 0)
self.assertEqual(result['instructions'], ['3. Add subpart M'])
self.assertEqual(result['tree']['marked_up'],
'<ins>New node text</ins>')
self.assertEqual(1, len(result['subparts']))
subpart_info = result['subparts'][0]
self.assertEqual('M', subpart_info.letter)
self.assertEqual('A New Subpart', subpart_info.title)
self.assertEqual(2, subpart_info.idx)
self.assertEqual(4, len(subpart_info.urls))
self.assertIn('111-42', subpart_info.urls[0])
self.assertIn('111-43', subpart_info.urls[1])
self.assertIn('111-44', subpart_info.urls[2])
self.assertIn('111-45', subpart_info.urls[3])
| 18F/regulations-site | regulations/tests/views_preamble_tests.py | Python | cc0-1.0 | 10,884 |
import os.path
import idb
def test_issue29():
"""
demonstrate GetManyBytes can retrieve the entire .text section
see github issue #29 for the backstory.
"""
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, "data", "issue29", "issue29.i64")
with idb.from_file(idbpath) as db:
api = idb.IDAPython(db)
seg = api.idc.FirstSeg()
while seg != api.idc.BADADDR:
name = api.idc.SegName(seg)
start = api.idc.SegStart(seg)
end = api.idc.SegEnd(seg)
if name == ".text":
# should not fail at address 0x180072200
textBytes = api.idc.GetManyBytes(start, end - start)
assert len(textBytes) == end - start
seg = api.idc.NextSeg(seg)
| williballenthin/python-idb | tests/test_issue29.py | Python | apache-2.0 | 820 |
import os
import numpy as np
import pydotplus as pydotplus
from orderedset import OrderedSet
from sklearn import tree
from collections import OrderedDict
from typing import List
from npf.build import Build
from npf.testie import Testie
from npf.types.dataset import Dataset
from npf import npf
class Statistics:
@staticmethod
def run(build: Build, all_results: Dataset, testie: Testie, max_depth=3, filename=None):
print("Building dataset...")
#Transform the dataset into a standard table of X/Features and Y observations
dataset = Statistics.buildDataset(all_results, testie)
#There's one per serie, so for each of those
for result_type, X, y, dtype in dataset:
if len(dataset) > 1:
print("Statistics for %s" % result_type)
print("Learning dataset built with %d samples and %d features..." % (X.shape[0], X.shape[1]))
clf = tree.DecisionTreeRegressor(max_depth=max_depth)
try:
clf = clf.fit(X, y)
except Exception as e:
print("Error while trying to fit the clf:")
print(e)
continue
if (max_depth is None and len(X) > 16) or (max_depth is not None and max_depth > 8):
print("No tree graph when maxdepth is > 8. Use --statistics-maxdepth 8 to fix it to 8.")
else:
dot_data = tree.export_graphviz(clf, out_file=None, filled=True, rounded=True, special_characters=True,
feature_names=dtype['names'])
graph = pydotplus.graph_from_dot_data(dot_data)
f = npf.build_filename(testie, build, filename if not filename is True else None, {}, 'pdf', result_type, show_serie=False, suffix="clf")
graph.write(f, format=os.path.splitext(f)[1][1:])
print("Decision tree visualization written to %s" % f)
vars_values = OrderedDict()
print("")
for i, column in enumerate(X.T):
varname = dtype['names'][i]
vars_values[varname] = set([v for v in np.unique(column)])
print("")
print("Feature importances :")
# noinspection PyUnresolvedReferences
l = list(zip(dtype['names'], clf.feature_importances_))
l.sort(key=lambda x: x[1])
for key, f in l:
if len(vars_values[key]) > 1:
print(" %s : %0.4f" % (key, f))
print('')
print("Better :")
best = X[y.argmax()]
print(" ", end='')
for i, name in enumerate(dtype['names']):
print("%s = %s, " % (name, best[i] if (dtype['values'][i] is None) else best[i] if type(best[i]) is np.str_ else dtype['values'][i][int(best[i])]), end='')
print(' : %.02f' % y.max())
print('')
print("Means and std/mean per variables :")
for i, (k, vals) in enumerate(vars_values.items()):
if len(vals) == 1:
continue
print("%s :" % k)
for v in sorted(vals):
vs = v if (dtype['values'][i] is None) else dtype['values'][i][int(v)]
tot = 0
n = 0
for ic in range(X.shape[0]):
if X[ic,i] == v:
tot += y[ic]
n += 1
if n == 0:
print(" %s : None" % vs)
else:
print(" %s : %.02f, " % (vs, tot / n))
print("")
@classmethod
def buildDataset(cls, all_results: Dataset, testie: Testie) -> List[tuple]:
#map of every <variable name, format>
dtype = testie.variables.dtype()
y = OrderedDict()
dataset = []
for i, (run, results_types) in enumerate(all_results.items()):
vars = list(run.variables[k] for k in dtype['names'])
if not results_types is None and len(results_types) > 0:
dataset.append([v for v in vars])
for result_type, results in results_types.items():
r = np.mean(results)
y.setdefault(result_type, []).append(r)
dtype['values'] = [None] * len(dtype['formats'])
for i, f in enumerate(dtype['formats']):
if f is str:
dtype['formats'][i] = int
values = OrderedSet()
for row in dataset:
values.add(row[i])
row[i] = values.index(row[i])
dtype['values'][i] = list(values)
X = np.array(dataset, ndmin=2)
lset = []
for result_type, v in y.items():
lset.append((result_type, X, np.array(v),dtype))
return lset
| tbarbette/npf | npf/statistics.py | Python | gpl-3.0 | 4,929 |
# -*- coding: utf-8 -*-
def command():
return "list-load-balancer"
def init_argument(parser):
parser.add_argument("--farm-no", required=True)
def execute(requester, args):
farm_no = args.farm_no
parameters = {}
parameters["FarmNo"] = farm_no
return requester.execute("/ListLoadBalancer", parameters)
| primecloud-controller-org/pcc-cli | src/pcc/api/lb/list_load_balancer.py | Python | apache-2.0 | 329 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_new_emailfield_max_length'),
]
operations = [
migrations.AddField(
model_name='parliamentarysession',
name='position_title',
field=models.ForeignKey(blank=True, to='core.PositionTitle', null=True),
),
]
| mysociety/pombola | pombola/core/migrations/0004_parliamentarysession_position_title.py | Python | agpl-3.0 | 460 |
#!/usr/bin/env python
"""
@author: dn13([email protected])
@author: Fibrizof([email protected])
"""
import types
def _splitword( text, quotes ):
if text[0] in quotes :
s = text.find(text[0],1)
return text[0], text[1:s], text[s+1:]
else :
for i in range(len(text)):
if text[i].isalpha() == False :
return '', text[:i], text[i:]
return '', text, ''
def buildformattingstring( text, sign=('/*%','*/'), quotes=['"','\'','`'] ):
'''
/*%(Arg)s*/'ABC' => "'%(Arg)s'", {'Arg':'ABC'}
'''
text = text.split(sign[0])
head, body = text[0], text[1:]
body = [ b.split(sign[1],1) for b in body ]
for b in body:
if len(b) != 2 :
raise ValueError, ( 'can not find the end of sign', b[0] )
signcnt, body = zip(*body)
ns = [ s[1:].split(')',1)[0] if s[0] == '(' else None for s in signcnt ]
body = [ _splitword(b, quotes) for b in body ]
qs, ds, bs = zip(*body)
body = [ [ '%s%%%s%s' % (q,s,q), b.replace('%','%%') ]
for s, q, b in zip( signcnt, qs, bs )]
body = sum( body, [] )
text= [head.replace('%','%%'),]+body
text = ''.join(text)
ns_n = [ n == None for n in ns ]
if all( ns_n ):
return text, ds
elif any( ns_n ):
raise ValueError, 'Mapping key and non-Mapping key all in text'
ds = dict(zip(ns,ds))
return text, ds
class MapableTuple( tuple ):
def setkeys( self, names ):
self._keys = dict( [ (k, i) for i, k in enumerate( names )
if k != None ] )
def __getitem__( self, ind ):
if type(ind) in types.StringTypes :
ind = self._keys[ind]
return super( MapableTuple, self ).__getitem__( ind )
def get( self, ind, default ):
if type(ind) in types.StringTypes :
if ind in self._keys :
ind = self._keys[ind]
else :
return default
return super( MapableTuple, self ).__getitem__( ind )
def keys( self ):
return self._keys.keys()
def __contains__( self, key ):
return key in self._keys
#def __iter__( self ):
#
# return self._keys.__iter__()
def __len__( self ):
return len( self._keys )
def items( self ):
r = self._keys.items()
r.sort( key = lambda x : x[1] )
return [ (k, self[i]) for k, i in r]
def todict( self ):
return dict( self.items() )
class Table( object ):
def __init__( self, columns, table ):
self._c = columns
self.columns = dict([ ( c, i ) for i, c in enumerate(columns) ])
self.index = {}
self.values = table
return
def todict( self ):
return [ dict( zip(self._c, r) ) for r in self.values ]
def __getitem__( self, rows ):
'''
t[1]
t['Name':'Cover']
t[:,'Machine']
t[5:7,'Machine']
t['Name':'Cover',('Machine',)]
t[5:7,6:9]
'''
cols = slice(None,None)
if type(rows) in ( types.TupleType, types.ListType ) :
if len(rows) != 2 :
raise TypeError, 'slice must a tuple/list length 2.'
rows, cols = rows
if type(rows) == types.SliceType :
if type( rows.start ) in types.StringTypes :
c = self.columns[rows.start]
rs = [ i for i, r in enumerate(self.values)
if r[c] == rows.stop ]
if len(rs) > 1 :
raise IndexError, \
( 'Multi Rows "%s"="%s" ' % (i.start,i.stop) ) \
+ 'Line:' + ','.join(rs)
elif len(rs) == 0 :
raise IndexError, '"%s" Not Found' % (i.start,i.stop)
rows = rs[0]
if type(cols) in ( types.TupleType, types.ListType ) :
cols = [ c if type(c) == types.IntType else self.columns[c]
for c in cols ]
elif type(cols) == types.SliceType :
if type( cols.start ) in types.StringTypes :
cols.start = self.columns[cols.start]
elif type( cols.stop ) in types.StringTypes :
cols.stop = self.columns[cols.stop]
elif type(cols) in types.StringTypes :
cols = self.columns[cols]
if type(cols) in ( types.IntType, types.SliceType ) :
y = lambda r : r[cols]
else :
y = lambda r : [ r[c] for c in cols ]
if type(rows) == types.IntType:
x = y(self.values[rows])
else :
x = [ y(r) for r in self.values[rows] ]
return x
def __repr__( self ):
return '(' + ', '.join( [ '('+', '.join([ repr(c) for c in r ])+')'
for r in self.values
] ) + ')'
class EasyDocError( Exception ):
"""
Error of EasyDocstriong Modules
"""
pass
class EasyDoc( object ):
def __init__( self, sep=1, title='##!#' ):
self.sep = sep
self.title = title
def parse( self, doc, parser = None, *args, **kwargs ):
if parser != None :
body = self.onestage( doc )
parser = getattr( self, 'parse_' + parser )
return parser( body, *args, **kwargs )
stages = self.splitstage(doc)
heads, bodys = zip(*stages)
# get the real body
ends = [ getattr( self, 'ends_'+h['__ends__'] ) for h in heads ]
bodys = [ e(b) for e, b in zip(ends,bodys) ]
# parse indent
bodys = [ [ l[h['__indent__']:] for l in b ]
for h, b in zip( heads, bodys ) ]
# get the names
names = [ h.get('',None) for h in heads ]
# parse the body
parses = [ getattr( self, 'parse_'+h['__type__'] ) for h in heads ]
argses = [ h['__args__'] for h in heads ]
bodys = [ p( b, *a[0], **a[1] )
for p, a, b in zip(parses, argses, bodys) ]
m = MapableTuple( bodys )
m.setkeys(names)
return m
def onestage( self, doc ):
dls = doc.splitlines()
if dls == [] :
return dls
while( dls[0].strip() == '' ):
del dls[0]
while( dls[-1].strip() == '' ):
del dls[-1]
return dls
def splitstage( self, doc ):
'''
split stage
'''
dls = doc.splitlines()
len_dls = len(dls)
striped_dls = [ l.strip() for l in dls ]
e = [ l=='' for l in striped_dls ]
e = [ e[ (i-self.sep) if i>self.sep else 0 :i] for i in range(len_dls) ]
e = [ all(el) for el in e ]
t = [ l.startswith(self.title) for l in striped_dls ]
s = [ i for e, t, i in zip(e,t,range(len_dls)) if e==True and t==True ]
s = [ dls[h:n] for h, n in zip( s, s[1:]+[None,] )]
s = [ (self.parsetitle(st[0]),st[1:]) for st in s ]
return s
@staticmethod
def getargs( *args, **kwargs ):
return ( args, kwargs )
@staticmethod
def read_stage_args( t ):
x = False
for i in range(len(t)):
if not t[i].isalpha() :
break
else :
return t, None, None, None
ty = t[0:i]
t = t[i:].lstrip()
args = None
if t[0] == '(' :
c = 0
for i in range(len(t)):
if t[i] == '(' :
c += 1
if t[i] == ')' :
c -= 1
if c == 0 :
break
else :
return ty, t, None, None
args = t[0:i+1]
t = t[i+1:]
t = t.lstrip()
t = t.split()
t = dict( zip( t[::2],t[1::2] ) )
return ty, args, t.get('ends',None), t.get('as',None)
def parsetitle( self, t ):
leftspace = t.find( self.title )
t = t[ leftspace + len(self.title) : ]
dot = t.rfind('.')
if dot < 0 :
raise SyntexError, 'doc must have a type'
ty, args, ends, name = self.read_stage_args( t[dot+1:] )
args = ([],{}) if args == None else \
self.getargs( eval('self.getargs%s' % (args,) ) )
name = name or t[:dot].strip()
ends = ends or 'E1'
ty = ty.lower()
return { '': name,
'__type__': ty,
'__args__': args,
'__ends__': ends,
'__indent__': leftspace,
}
def _ends_E( self, lines, n ):
e = [ l.strip()=='' for l in lines ]
e = [ e[i:i+n] for i in range(len(lines)) ]
e = [ i for i, el in enumerate(e) if all(el) ]
if e == []:
return lines
else :
return lines[:e[0]]
def ends_E1( self, lines ):
return self._ends_E( lines, 1 )
def ends_E2( self, lines ):
return self._ends_E( lines, 2 )
def ends_E3( self, lines ):
return self._ends_E( lines, 3 )
def parse_value( self, lines ):
return '\r\n'.join(lines)
def parse_object( self, lines ):
r = [ li for li in lines if not li.lstrip().startswith('#') ]
r = [ li.split(':',1) for li in r ]
r = [ [ k.strip(), v.strip() ] for k, v in r ]
return dict(r)
def parse_object_ex( self, lines ):
if lines == [] :
return {}
spacelen = lambda x : len(x) - len(x.lstrip(' '))
sl = spacelen(lines[0])
p = [ i for i, li in enumerate( lines ) if spacelen(li) == sl ]
seg = zip( p, p[1:]+[len(lines)] )
r = {}
for f, b in seg :
k, v = lines[f].split(':',1)
k = k.strip()
v = v.strip()
if b - f == 1 :
r[k] = v
else :
r[k] = self.parse_object_ex( lines[f+1:b] )
if v != '' :
r[k][''] = v
return r
def parse_table( self, lines ):
if lines[0].startswith('!'):
ns = [ c.strip() for c in lines[0].split('!') ][1:]
cols = [ n for n, i in enumerate(lines[0]) if i == '!' ]
lines = lines[1:]
else :
ns = []
slines = lines[:]
slines.sort(key=lambda x:len(x),reverse=True)
maxlen = len(slines[0])
cols = [False,]*maxlen
for i in range(maxlen):
for li in lines :
try :
if li[i]!=' ' :
break
except IndexError, e :
pass
else :
cols[i] = True
cols = zip( cols, [True]+cols )
cols = [ i for i, t in enumerate(cols)
if t[0]==False and t[1]==True ]
cols = [ slice(a, b) for a, b in zip(cols,cols[1:]+[None]) ]
rows = [ i for i, li in enumerate(lines) if not li.startswith('!') ]
lines = [ li if not li.startswith('!') else ' '+li[1:] for li in lines ]
rows = [ slice(a, b) for a, b in zip(rows,rows[1:]+[None]) ]
tbs = [ [ '\r\n'.join([ l[c].rstrip() for l in lines[r] ])
for c in cols ]
for r in rows ]
return Table( ns, tbs )
def parse_json( self ):
pass
DEFAULT_E = EasyDoc()
def parse( doc, *args, **kwargs ):
return DEFAULT_E.parse( doc, *args, **kwargs )
if __name__=='__main__':
from pprint import pprint
d = '''
some infos ( not parse )
##!# Metas_A .object
name : foo
author : d13
version : 1.0
##!# Metas_B .value
array(
#.Subject : string,
#.Result : ascii(1),
)
##!# Table_A .table
!Number !Alpha !GreekAlphabet !phonetic
1 a alpha [a]
2 b beta [v]
3 c gamma
##!# Table_B .table
1 a alpha [a]
2 b beta [v]
3 c gamma
##!# Table_C .table
!Student !Subject !Result
Joth Math A
! History B
! Geography A
Marry Society B
! History A
##!# FOO .value ends E2 as Metas_C
array(
#.Subject1 : string,
#.Subject2 : string,
#.Result1 : ascii(1),
#.Result2 : ascii(1),
)
##!# BAR .value as Metas_D
A
##!# Metas_EX .object_ex
argument: showstyle
showtype: dropdownlist
items: ['table','list']
default: 'table'
action: js.changeshowstyle
showtype: text
'''
e = EasyDoc()
r = e.parse(d)
pprint( r )
#({'author': 'd13',
# 'name': 'foo',
# 'version': '1.0'},
# 'array(\r\n #.Subject : string,\r\n #.Result : ascii(1),\r\n)',
# (('1', 'a', 'alpha', '[a]'), ('2', 'b', 'beta', '[v]'), ('3', 'c', 'gamma', '')),
# (('1', 'a', 'alpha', '[a]'), ('2', 'b', 'beta', '[v]'), ('3', 'c', 'gamma', '')),
# (('Joth\r\n\r\n', 'Math\r\nHistory\r\nGeography', 'A\r\nB\r\nA'), ('Marry\r\n', 'Society\r\nHistory', 'B\r\nA')))
print r['Table_A'][:,'GreekAlphabet']
#['alpha', 'beta', 'gamma']
print r['Table_A'][1]
#['2', 'b', 'beta', '[v]']
print r['Table_A'][1,1]
#b
print r['Metas_C']
print r['Metas_EX']
| hackshel/py-aluminium | src/easydoc.py | Python | bsd-3-clause | 16,108 |
"""
Write code to make the following unit test pass
"""
| ynonp/python-examples-verint-2016-07 | 25_exceptions_lab/03.py | Python | mit | 57 |
import _plotly_utils.basevalidators
class XaxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="xaxis", parent_name="carpet", **kwargs):
super(XaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "x"),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/carpet/_xaxis.py | Python | mit | 449 |
#########################################################################
#
# detectors.py - This file is part of the Spectral Python (SPy)
# package.
#
# Copyright (C) 2012-2013 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, [email protected]
#
'''
Spectral target detection algorithms
'''
from __future__ import division, print_function, unicode_literals
__all__ = ['MatchedFilter', 'matched_filter', 'RX', 'rx', 'ace']
import numpy as np
from spectral.algorithms.transforms import LinearTransform
class MatchedFilter(LinearTransform):
r'''A callable linear matched filter.
Given target/background means and a common covariance matrix, the matched
filter response is given by:
.. math::
y=\frac{(\mu_t-\mu_b)^T\Sigma^{-1}(x-\mu_b)}{(\mu_t-\mu_b)^T\Sigma^{-1}(\mu_t-\mu_b)}
where :math:`\mu_t` is the target mean, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the covariance.
'''
def __init__(self, background, target):
'''Creates the filter, given background/target means and covariance.
Arguments:
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`).
`target` (ndarray):
Length-K target mean
'''
from math import sqrt
from spectral.algorithms.transforms import LinearTransform
self.background = background
self.u_b = background.mean
self.u_t = target
self._whitening_transform = None
d_tb = (target - self.u_b)
self.d_tb = d_tb
C_1 = background.inv_cov
self.C_1 = C_1
# Normalization coefficient (inverse of squared Mahalanobis distance
# between u_t and u_b)
self.coef = 1.0 / d_tb.dot(C_1).dot(d_tb)
LinearTransform.__init__(
self, (self.coef * d_tb).dot(C_1), pre=-self.u_b)
def whiten(self, X):
'''Transforms data to the whitened space of the background.
Arguments:
`X` (ndarray):
Size (M,N,K) or (M*N,K) array of length K vectors to transform.
Returns an array of same size as `X` but linearly transformed to the
whitened space of the filter.
'''
import math
from spectral.algorithms.transforms import LinearTransform
from spectral.algorithms.spymath import matrix_sqrt
if self._whitening_transform is None:
A = math.sqrt(self.coef) * self.background.sqrt_inv_cov
self._whitening_transform = LinearTransform(A, pre=-self.u_b)
return self._whitening_transform(X)
def matched_filter(X, target, background=None, window=None, cov=None):
r'''Computes a linear matched filter target detector score.
Usage:
y = matched_filter(X, target, background)
y = matched_filter(X, target, window=<win> [, cov=<cov>])
Given target/background means and a common covariance matrix, the matched
filter response is given by:
.. math::
y=\frac{(\mu_t-\mu_b)^T\Sigma^{-1}(x-\mu_b)}{(\mu_t-\mu_b)^T\Sigma^{-1}(\mu_t-\mu_b)}
where :math:`\mu_t` is the target mean, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the covariance.
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an image with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`target` (ndarray):
Length-K vector specifying the target to be detected.
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats` for an image). This argument is not
required if `window` is given.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`. If this argument is given, `background`
is not required (and will be ignored, if given).
The window is modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all matched filter calculations (background
covariance will not be recomputed in each window). Only the
background mean will be recomputed in each window). If the
`window` argument is specified, providing `cov` will allow the
result to be computed *much* faster.
Returns numpy.ndarray:
The return value will be the matched filter scores distance) for each
pixel given. If `X` has shape (R, C, K), the returned ndarray will
have shape (R, C).
'''
if background is not None and window is not None:
raise ValueError('`background` and `window` are mutually ' \
'exclusive arguments.')
if window is not None:
from .spatial import map_outer_window_stats
def mf_wrapper(bg, x):
return MatchedFilter(bg, target)(x)
return map_outer_window_stats(mf_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
from spectral.algorithms.algorithms import calc_stats
if background is None:
background = calc_stats(X)
return MatchedFilter(background, target)(X)
class RX():
r'''An implementation of the RX anomaly detector. Given the mean and
covariance of the background, this detector returns the squared Mahalanobis
distance of a spectrum according to
.. math::
y=(x-\mu_b)^T\Sigma^{-1}(x-\mu_b)
where `x` is the unknown pixel spectrum, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the background covariance.
References:
Reed, I.S. and Yu, X., "Adaptive multiple-band CFAR detection of an optical
pattern with unknown spectral distribution," IEEE Trans. Acoust.,
Speech, Signal Processing, vol. 38, pp. 1760-1770, Oct. 1990.
'''
dim_out=1
def __init__(self, background=None):
'''Creates the detector, given optional background/target stats.
Arguments:
`background` (`GaussianStats`, default None):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
'''
from math import sqrt
if background is not None:
self.set_background(background)
else:
self.background = None
def set_background(self, stats):
'''Sets background statistics to be used when applying the detector.'''
self.background = stats
def __call__(self, X):
'''Applies the RX anomaly detector to X.
Arguments:
`X` (numpy.ndarray):
For an image with shape (R, C, B), `X` can be a vector of
length B (single pixel) or an ndarray of shape (R, C, B) or
(R * C, B).
Returns numpy.ndarray or float:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` is a single pixel, a float
will be returned; otherwise, the return value will be an ndarray
of floats with one less dimension than the input.
'''
from spectral.algorithms.algorithms import calc_stats
if not isinstance(X, np.ndarray):
raise TypeError('Expected a numpy.ndarray.')
if self.background is None:
self.set_background(calc_stats(X))
X = (X - self.background.mean)
C_1 = self.background.inv_cov
ndim = X.ndim
shape = X.shape
if ndim == 1:
return X.dot(C_1).dot(X)
if ndim == 3:
X = X.reshape((-1, X.shape[-1]))
A = X.dot(C_1)
r = np.einsum('ij,ij->i', A, X)
return r.reshape(shape[:-1])
# I tried using einsum for the above calculations but, surprisingly,
# it was *much* slower than using dot & sum. Need to figure out if
# that is due to multithreading or some other reason.
# print 'ndim =', ndim
# if ndim == 1:
# return np.einsum('i,ij,j', X, self.background.inv_cov, X)
# if ndim == 3:
# return np.einsum('ijk,km,ijm->ij',
# X, self.background.inv_cov, X).squeeze()
# elif ndim == 2:
# return np.einsum('ik,km,im->i',
# X, self.background.inv_cov, X).squeeze()
# else:
# raise Exception('Unexpected number of dimensions.')
#
def rx(X, background=None, window=None, cov=None):
r'''Computes RX anomaly detector scores.
Usage:
y = rx(X [, background=bg])
y = rx(X, window=(inner, outer) [, cov=C])
The RX anomaly detector produces a detection statistic equal to the
squared Mahalanobis distance of a spectrum from a background distribution
according to
.. math::
y=(x-\mu_b)^T\Sigma^{-1}(x-\mu_b)
where `x` is the pixel spectrum, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the background covariance.
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an image with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`.
The window are modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all RX calculations (background covariance
will not be recomputed in each window). Only the background
mean will be recomputed in each window).
Returns numpy.ndarray:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` has shape (R, C, B), the
returned ndarray will have shape (R, C)..
References:
Reed, I.S. and Yu, X., "Adaptive multiple-band CFAR detection of an optical
pattern with unknown spectral distribution," IEEE Trans. Acoust.,
Speech, Signal Processing, vol. 38, pp. 1760-1770, Oct. 1990.
'''
if background is not None and window is not None:
raise ValueError('`background` and `window` keywords are mutually ' \
'exclusive.')
if window is not None:
from .spatial import map_outer_window_stats
rx = RX()
def rx_wrapper(bg, x):
rx.set_background(bg)
return rx(x)
return map_outer_window_stats(rx_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
return RX(background)(X)
class ACE():
r'''Adaptive Coherence/Cosine Estimator (ACE).
'''
def __init__(self, target, background=None, **kwargs):
'''Creates the callable detector for target and background.
Arguments:
`target` (ndarray or sequence of ndarray):
Can be either:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
Keyword Arguments:
`vectorize` (bool, default True):
Specifies whether the __call__ method should attempt to vectorize
operations. This typicall results in faster computation but will
consume more memory.
'''
for k in kwargs:
if k not in ('vectorize'):
raise ValueError('Invalid keyword: {0}'.format(k))
self.vectorize = kwargs.get('vectorize', True)
self._target = None
self._background = None
self.set_target(target)
if background is not None:
self.set_background(background)
else:
self._background = None
def set_target(self, target):
'''Specifies target or target subspace used by the detector.
Arguments:
`target` (ndarray or sequence of ndarray):
Can be either:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
'''
if target is None:
self._target = None
else:
self._target = np.array(target, ndmin=2)
self._update_constants()
def set_background(self, stats):
'''Sets background statistics to be used when applying the detector.
Arguments:
`stats` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
'''
self._background = stats
self._update_constants()
def _update_constants(self):
'''Computes and caches constants used when applying the detector.'''
if self._background is not None and self._target is not None:
if self._background.mean is not None:
target = (self._target - self._background.mean).T
else:
target = self._target.T
self._S = self._background.sqrt_inv_cov.dot(target)
self._P = self._S.dot(np.linalg.pinv(self._S))
else:
self._C = None
self._P = None
def __call__(self, X):
'''Compute ACE detector scores for X.
Arguments:
`X` (numpy.ndarray):
For an image with shape (R, C, B), `X` can be a vector of
length B (single pixel) or an ndarray of shape (R, C, B) or
(R * C, B).
Returns numpy.ndarray or float:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` is a single pixel, a float
will be returned; otherwise, the return value will be an ndarray
of floats with one less dimension than the input.
'''
from spectral.algorithms.algorithms import calc_stats
if not isinstance(X, np.ndarray):
raise TypeError('Expected a numpy.ndarray.')
shape = X.shape
if X.ndim == 1:
# Compute ACE score for single pixel
if self._background.mean is not None:
X = X - self._background.mean
z = self._background.sqrt_inv_cov.dot(X)
return z.dot(self._P).dot(z) / (z.dot(z))
if self._background is None:
self.set_background(calc_stats(X))
if self.vectorize:
# Compute all scores at once
if self._background.mean is not None:
X = X - self._background.mean
if X.ndim == 3:
X = X.reshape((-1, X.shape[-1]))
z = self._background.sqrt_inv_cov.dot(X.T).T
zP = np.dot(z, self._P)
zPz = np.einsum('ij,ij->i', zP, z)
zz = np.einsum('ij,ij->i', z, z)
return (zPz / zz).reshape(shape[:-1])
else:
# Call recursively for each pixel
return np.apply_along_axis(self, -1, X)
def ace(X, target, background=None, window=None, cov=None, **kwargs):
r'''Returns Adaptive Coherence/Cosine Estimator (ACE) detection scores.
Usage:
y = ace(X, target, background)
y = ace(X, target, window=<win> [, cov=<cov>])
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an ndarray with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`target` (ndarray or sequence of ndarray):
If `X` has shape (R, C, B), `target` can be any of the following:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
A length-D sequence (e.g., list or tuple) of length-B ndarrays.
In this case, the detector will be applied seperately to each of
the `D` targets. This is equivalent to calling the function
sequentially for each target and stacking the results but is
much faster. The return value will be an ndarray with shape
(R, C, D).
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats` for an image). This argument is not
required if `window` is given.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`. If this argument is given, `background`
is not required (and will be ignored, if given).
The window is modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all matched filter calculations (background
covariance will not be recomputed in each window). Only the
background mean will be recomputed in each window). If the
`window` argument is specified, providing `cov` will allow the
result to be computed *much* faster.
Keyword Arguments:
`vectorize` (bool, default True):
Specifies whether the function should attempt to vectorize
operations. This typicall results in faster computation but will
consume more memory.
Returns numpy.ndarray:
The return value will be the ACE scores for each input pixel. The shape
of the returned array will be either (R, C) or (R, C, D), depending on
the value of the `target` argument.
References:
Kraut S. & Scharf L.L., "The CFAR Adaptive Subspace Detector is a Scale-
Invariant GLRT," IEEE Trans. Signal Processing., vol. 47 no. 9, pp. 2538-41,
Sep. 1999
'''
import spectral as spy
if background is not None and window is not None:
raise ValueError('`background` and `window` keywords are mutually ' \
'exclusive.')
detector = ACE(target, background, **kwargs)
if window is None:
# Use common background statistics for all pixels
if isinstance(target, np.ndarray):
# Single detector score for target subspace for each pixel
result = detector(X)
else:
# Separate score arrays for each target in target list
if background is None:
detector.set_background(spy.calc_stats(X))
def apply_to_target(t):
detector.set_target(t)
return detector(X)
result = np.array([apply_to_target(t) for t in target])
if result.ndim == 3:
result = result.transpose(1, 2, 0)
else:
# Compute local background statistics for each pixel
from spectral.algorithms.spatial import map_outer_window_stats
if isinstance(target, np.ndarray):
# Single detector score for target subspace for each pixel
def ace_wrapper(bg, x):
detector.set_background(bg)
return detector(x)
result = map_outer_window_stats(ace_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
# Separate score arrays for each target in target list
def apply_to_target(t, x):
detector.set_target(t)
return detector(x)
def ace_wrapper(bg, x):
detector.set_background(bg)
return [apply_to_target(t, x) for t in target]
result = map_outer_window_stats(ace_wrapper, X, window[0], window[1],
dim_out=len(target), cov=cov)
if result.ndim == 3:
result = result.transpose(1, 2, 0)
# Convert NaN values to zero
result = np.nan_to_num(result)
if isinstance(result, np.ndarray):
return np.clip(result, 0, 1, out=result)
else:
return np.clip(result, 0, 1)
| ohspite/spectral | spectral/algorithms/detectors.py | Python | gpl-2.0 | 28,119 |
#
# SNMPv1 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1157.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString): pass
class RequestID(univ.Integer): pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer): pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3), ('authenticationFailure', 4), ('egpNeighborLoss', 5), ('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
| ychen820/microblog | y/google-cloud-sdk/lib/pyasn1_modules/rfc1157.py | Python | bsd-3-clause | 3,285 |
"""
Authors: Damien Irving ([email protected])
Copyright 2015 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module wraps a shell script that performs zonal aggregation:
cwsl-ctools/aggregation/cdo_zonal_agg.sh
Part of the CWSLab Model Analysis Service VisTrails plugin.
"""
from vistrails.core.modules import vistrails_module, basic_modules
from cwsl.configuration import configuration
from cwsl.core.constraint import Constraint
from cwsl.core.process_unit import ProcessUnit
from cwsl.core.pattern_generator import PatternGenerator
class ZonalAggregation(vistrails_module.Module):
"""Aggregation along the zonal (longitudinal) axis.
Wraps the cwsl-ctools/aggregation/cdo_zonal_agg.sh script.
Inputs:
in_dataset: Can consist of netCDF files and/or cdml catalogue files
method: Aggregation method. Choices are zonmin, zonmax, zonsum,
zonmean, zonavg, zonvar, zonstd, zonpctl,N
(where N is the percentile)
Outputs:
out_dataset: Consists of netCDF files (i.e. cdml catalogue files
are converted).
"""
_input_ports = [('in_dataset', 'csiro.au.cwsl:VtDataSet',
{'labels': str(['Input dataset'])}),
('method', basic_modules.String,
{'labels': str(['Aggregation method'])}),
]
_output_ports = [('out_dataset', 'csiro.au.cwsl:VtDataSet')]
_execution_options = {'required_modules': ['cdo', 'python/2.7.5', 'python-cdat-lite/6.0rc2-py2.7.5']}
command = '${CWSL_CTOOLS}/aggregation/cdo_zonal_agg.sh'
def __init__(self):
super(ZonalAggregation, self).__init__()
self.out_pattern = PatternGenerator('user', 'default').pattern
def compute(self):
in_dataset = self.getInputFromPort('in_dataset')
method = self.getInputFromPort('method')
self.positional_args = [(method, 0, 'raw'), ]
self.keyword_args = {}
if len(method.split(',')) > 1:
agg_constraint = "".join(method.split(','))
else:
agg_constraint = method
new_constraints_for_output = set([Constraint('lonagg_info', [agg_constraint]),
Constraint('suffix', ['nc']),
])
this_process = ProcessUnit([in_dataset],
self.out_pattern,
self.command,
new_constraints_for_output,
execution_options=self._execution_options,
positional_args=self.positional_args,
cons_keywords=self.keyword_args)
try:
this_process.execute(simulate=configuration.simulate_execution)
except Exception as e:
raise vistrails_module.ModuleError(self, repr(e))
process_output = this_process.file_creator
self.setResult('out_dataset', process_output)
| CWSL/cwsl-mas | cwsl/vt_modules/vt_zonal_agg.py | Python | apache-2.0 | 3,526 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.utils import normalize_path
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
from pip.wheel import WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
logger = logging.getLogger(__name__)
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help=("Build wheels into <dir>, where the default is "
"'<cwd>/wheelhouse'."),
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.editable.make())
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.src.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
# Hack to make flake8 not complain about an unused import
wheel.bdist_wheel
except ImportError:
raise CommandError(
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
try:
import pkg_resources
except ImportError:
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
else:
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
with self._build_session(options) as session:
finder = PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
build_delete = (not (options.no_clean or options.build_dir))
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
isolated=options.isolated_mode,
session=session,
wheel_download_dir=options.wheel_dir
)
# make the wheelhouse
if not os.path.exists(options.wheel_dir):
os.makedirs(options.wheel_dir)
# parse args and/or requirements files
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, None, isolated=options.isolated_mode,
)
)
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
name,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder,
options=options,
session=session):
requirement_set.add_requirement(req)
# fail if no requirements
if not requirement_set.has_requirements:
logger.error(
"You must give at least one requirement to %s "
"(see \"pip help %s\")",
self.name, self.name,
)
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
| d3banjan/polyamide | webdev/lib/python2.7/site-packages/pip/commands/wheel.py | Python | bsd-2-clause | 9,184 |
""" ViewTemperature class """
# pylint: disable=no-member
from chartingperformance import db_session
from chartingperformance.views.view import View
from chartingperformance.models import TemperatureHourly
from chartingperformance.models import HDDHourly
from flask import jsonify
from sqlalchemy import func
from sqlalchemy.sql import label, and_
class Temperature(View):
""" Temperature view query and response methods. """
def __init__(self, args, house_id):
super(Temperature, self).__init__(args)
if self.success:
self.get_totals(house_id)
self.get_items()
def get_totals(self, house_id):
""" Get and store totals from database. """
self.base_query = db_session.\
query(label('date', func.min(TemperatureHourly.date)),
label('min_temperature',
func.min(TemperatureHourly.temperature)),
label('max_temperature',
func.max(TemperatureHourly.temperature)),
label('avg_temperature',
func.avg(TemperatureHourly.temperature)),
label('min_humidity',
func.min(TemperatureHourly.humidity)),
label('max_humidity',
func.max(TemperatureHourly.humidity)),
label('sum_hdd',
func.sum(HDDHourly.hdd))).\
outerjoin(HDDHourly, and_(HDDHourly.date == TemperatureHourly.date,
HDDHourly.house_id == TemperatureHourly.house_id)).\
filter(and_(TemperatureHourly.house_id == house_id,
TemperatureHourly.device_id == self.args['location']))
self.filter_query_by_date_range(TemperatureHourly)
totals = self.base_query.one()
self.json_totals = {'min_temperature': str(totals.min_temperature),
'max_temperature': str(totals.max_temperature),
'avg_temperature': str(totals.avg_temperature),
'sum_hdd': str(totals.sum_hdd),
'min_humidity': str(totals.min_humidity),
'max_humidity': str(totals.max_humidity)}
def get_items(self):
""" Get and store rows from database. """
items = self.group_query_by_interval(TemperatureHourly)
self.json_items = []
for item in items:
data = {'date': self.format_date(item.date),
'min_temperature': str(item.min_temperature),
'max_temperature': str(item.max_temperature),
'avg_temperature': str(item.avg_temperature),
'sum_hdd': str(item.sum_hdd),
'min_humidity': str(item.min_humidity),
'max_humidity': str(item.max_humidity)}
self.json_items.append(data)
def get_response(self):
""" Return response in json format. """
if not self.success:
return jsonify(self.error)
return jsonify(view='temperature',
interval=self.args['interval'],
location=self.args['location'],
totals=self.json_totals,
items=self.json_items)
| netplusdesign/home-performance-flask-api | chartingperformance/views/temperature.py | Python | mit | 3,516 |
from __future__ import division
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.datasets import star98
from statsmodels.emplike.descriptive import DescStat
from .results.el_results import DescStatRes
class GenRes(object):
"""
Reads in the data and creates class instance to be tested
"""
def __init__(self):
data = star98.load()
desc_stat_data = data.exog[:50, 5]
mv_desc_stat_data = data.exog[:50, 5:7] # mv = multivariate
self.res1 = DescStat(desc_stat_data)
self.res2 = DescStatRes()
self.mvres1 = DescStat(mv_desc_stat_data)
class TestDescriptiveStatistics(GenRes):
def __init__(self):
super(TestDescriptiveStatistics, self).__init__()
def test_test_mean(self):
assert_almost_equal(self.res1.test_mean(14),
self.res2.test_mean_14, 4)
def test_test_mean_weights(self):
assert_almost_equal(self.res1.test_mean(14, return_weights=1)[2],
self.res2.test_mean_weights, 4)
def test_ci_mean(self):
assert_almost_equal(self.res1.ci_mean(), self.res2.ci_mean, 4)
def test_test_var(self):
assert_almost_equal(self.res1.test_var(3),
self.res2.test_var_3, 4)
def test_test_var_weights(self):
assert_almost_equal(self.res1.test_var(3, return_weights=1)[2],
self.res2.test_var_weights, 4)
def test_ci_var(self):
assert_almost_equal(self.res1.ci_var(), self.res2.ci_var, 4)
def test_mv_test_mean(self):
assert_almost_equal(self.mvres1.mv_test_mean(np.array([14, 56])),
self.res2.mv_test_mean, 4)
def test_mv_test_mean_weights(self):
assert_almost_equal(self.mvres1.mv_test_mean(np.array([14, 56]),
return_weights=1)[2],
self.res2.mv_test_mean_wts, 4)
def test_test_skew(self):
assert_almost_equal(self.res1.test_skew(0),
self.res2.test_skew, 4)
def test_ci_skew(self):
"""
This will be tested in a round about way since MATLAB fails when
computing CI with multiple nuisance parameters. The process is:
(1) Get CI for skewness from ci.skew()
(2) In MATLAB test the hypotheis that skew=results of test_skew.
(3) If p-value approx .05, test confirmed
"""
skew_ci = self.res1.ci_skew()
lower_lim = skew_ci[0]
upper_lim = skew_ci[1]
ul_pval = self.res1.test_skew(lower_lim)[1]
ll_pval = self.res1.test_skew(upper_lim)[1]
assert_almost_equal(ul_pval, .050000, 4)
assert_almost_equal(ll_pval, .050000, 4)
def test_ci_skew_weights(self):
assert_almost_equal(self.res1.test_skew(0, return_weights=1)[2],
self.res2.test_skew_wts, 4)
def test_test_kurt(self):
assert_almost_equal(self.res1.test_kurt(0),
self.res2.test_kurt_0, 4)
def test_ci_kurt(self):
"""
Same strategy for skewness CI
"""
kurt_ci = self.res1.ci_kurt(upper_bound=.5, lower_bound=-1.5)
lower_lim = kurt_ci[0]
upper_lim = kurt_ci[1]
ul_pval = self.res1.test_kurt(upper_lim)[1]
ll_pval = self.res1.test_kurt(lower_lim)[1]
assert_almost_equal(ul_pval, .050000, 4)
assert_almost_equal(ll_pval, .050000, 4)
def test_joint_skew_kurt(self):
assert_almost_equal(self.res1.test_joint_skew_kurt(0, 0),
self.res2.test_joint_skew_kurt, 4)
def test_test_corr(self):
assert_almost_equal(self.mvres1.test_corr(.5),
self.res2.test_corr, 4)
def test_ci_corr(self):
corr_ci = self.mvres1.ci_corr()
lower_lim = corr_ci[0]
upper_lim = corr_ci[1]
ul_pval = self.mvres1.test_corr(upper_lim)[1]
ll_pval = self.mvres1.test_corr(lower_lim)[1]
assert_almost_equal(ul_pval, .050000, 4)
assert_almost_equal(ll_pval, .050000, 4)
def test_test_corr_weights(self):
assert_almost_equal(self.mvres1.test_corr(.5, return_weights=1)[2],
self.res2.test_corr_weights, 4)
| waynenilsen/statsmodels | statsmodels/emplike/tests/test_descriptive.py | Python | bsd-3-clause | 4,334 |
Subsets and Splits