repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
xionzz/earthquake | venv/lib/python2.7/site-packages/numpy/core/setup.py | 8 | 42574 | from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Python 2.3 causes a segfault when
# trying to re-acquire the thread-state
# which is done in error-handling
# ufunc code. NPY_ALLOW_C_API and friends
# cause the segfault. So, we disable threading
# for now.
if sys.version[:5] < '2.4.2':
nosmp = 1
else:
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
try:
nosmp = os.environ['NPY_NOSMP']
nosmp = 1
except KeyError:
nosmp = 0
return nosmp == 1
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn),
call=False):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_func(fn, decl='int %s a;' % (fn), call=False):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [8, 12, 16]
expected['Py_intptr_t'] = [4, 8]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [4, 8]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c')]
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources = multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends = deps + multiarray_deps,
libraries = ['npymath', 'npysort'])
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# scalarmath module #
#######################################################################
config.add_extension('scalarmath',
sources = [join('src', 'scalarmathmodule.c.src'),
join('src', 'private', 'scalarmathmodule.h.src'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
generate_ufunc_api],
depends = deps + npymath_sources,
libraries = ['npymath'],
)
#######################################################################
# _dotblas module #
#######################################################################
# Configure blasdot
blas_info = get_info('blas_opt', 0)
#blas_info = {}
def get_dotblas_sources(ext, build_dir):
if blas_info:
if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):
return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
return ext.depends[:2]
return None # no extension module will be built
config.add_extension('_dotblas',
sources = [get_dotblas_sources],
depends = [join('blasdot', '_dotblas.c'),
join('blasdot', 'apple_sgemv_patch.c'),
join('blasdot', 'cblas.h'),
],
include_dirs = ['blasdot'],
extra_info = blas_info
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| mit | 7,332,196,290,226,749,000 | 40.986193 | 97 | 0.522126 | false |
MalloyDelacroix/DownloaderForReddit | Tests/unittests/utils/importers/test_text_importer.py | 1 | 1164 | from unittest import TestCase
from DownloaderForReddit.utils.importers import text_importer
class TestTextImporter(TestCase):
def test_remove_forbidden_chars(self):
text = ' this \n is a\nname-for-import '
clean = text_importer.remove_forbidden_chars(text)
self.assertEqual('thisisaname-for-import', clean)
def test_split_names(self):
names = 'name_one, name_two, name_three, name_four'
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_split_names_with_extra_commas(self):
names = ', name_one, name_two, name_three, name_four, '
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_filter_import_list(self):
names = ['one', 'two', 'one', 'three', 'One', 'ONE', 'oNe', 'four', 'one', '', 'five', 'one', 'ONE', 'six']
filtered_names = text_importer.filter_import_list(names)
correct_names = ['one', 'two', 'three', 'four', 'five', 'six']
self.assertEqual(correct_names, filtered_names)
| gpl-3.0 | -31,173,282,697,370,310 | 42.111111 | 115 | 0.634021 | false |
primiano/blink-gitcs | Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py | 59 | 13691 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
class TestConfiguration(object):
def __init__(self, version, architecture, build_type):
self.version = version
self.architecture = architecture
self.build_type = build_type
@classmethod
def category_order(cls):
"""The most common human-readable order in which the configuration properties are listed."""
return ['version', 'architecture', 'build_type']
def items(self):
return self.__dict__.items()
def keys(self):
return self.__dict__.keys()
def __str__(self):
return ("<%(version)s, %(architecture)s, %(build_type)s>" %
self.__dict__)
def __repr__(self):
return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
def __hash__(self):
return hash(self.version + self.architecture + self.build_type)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def values(self):
"""Returns the configuration values of this instance as a tuple."""
return self.__dict__.values()
class SpecifierSorter(object):
def __init__(self, all_test_configurations=None, macros=None):
self._specifier_to_category = {}
if not all_test_configurations:
return
for test_configuration in all_test_configurations:
for category, specifier in test_configuration.items():
self.add_specifier(category, specifier)
self.add_macros(macros)
def add_specifier(self, category, specifier):
self._specifier_to_category[specifier] = category
def add_macros(self, macros):
if not macros:
return
# Assume well-formed macros.
for macro, specifier_list in macros.items():
self.add_specifier(self.category_for_specifier(specifier_list[0]), macro)
@classmethod
def category_priority(cls, category):
return TestConfiguration.category_order().index(category)
def specifier_priority(self, specifier):
return self.category_priority(self._specifier_to_category[specifier])
def category_for_specifier(self, specifier):
return self._specifier_to_category.get(specifier)
def sort_specifiers(self, specifiers):
category_slots = map(lambda x: [], TestConfiguration.category_order())
for specifier in specifiers:
category_slots[self.specifier_priority(specifier)].append(specifier)
def sort_and_return(result, specifier_list):
specifier_list.sort()
return result + specifier_list
return reduce(sort_and_return, category_slots, [])
class TestConfigurationConverter(object):
def __init__(self, all_test_configurations, configuration_macros=None):
self._all_test_configurations = all_test_configurations
self._configuration_macros = configuration_macros or {}
self._specifier_to_configuration_set = {}
self._specifier_sorter = SpecifierSorter()
self._collapsing_sets_by_size = {}
self._junk_specifier_combinations = {}
self._collapsing_sets_by_category = {}
matching_sets_by_category = {}
for configuration in all_test_configurations:
for category, specifier in configuration.items():
self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
self._specifier_sorter.add_specifier(category, specifier)
self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
# FIXME: This seems extra-awful.
for cat2, spec2 in configuration.items():
if category == cat2:
continue
matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
for collapsing_set in self._collapsing_sets_by_category.values():
self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
for specifier, sets_by_category in matching_sets_by_category.items():
for category, set_by_category in sets_by_category.items():
if len(set_by_category) == 1 and self._specifier_sorter.category_priority(category) > self._specifier_sorter.specifier_priority(specifier):
self._junk_specifier_combinations[specifier] = set_by_category
self._specifier_sorter.add_macros(configuration_macros)
def specifier_sorter(self):
return self._specifier_sorter
def _expand_macros(self, specifier):
expanded_specifiers = self._configuration_macros.get(specifier)
return expanded_specifiers or [specifier]
def to_config_set(self, specifier_set, error_list=None):
"""Convert a list of specifiers into a set of TestConfiguration instances."""
if len(specifier_set) == 0:
return copy.copy(self._all_test_configurations)
matching_sets = {}
for specifier in specifier_set:
for expanded_specifier in self._expand_macros(specifier):
configurations = self._specifier_to_configuration_set.get(expanded_specifier)
if not configurations:
if error_list is not None:
error_list.append("Unrecognized specifier '" + expanded_specifier + "'")
return set()
category = self._specifier_sorter.category_for_specifier(expanded_specifier)
matching_sets.setdefault(category, set()).update(configurations)
return reduce(set.intersection, matching_sets.values())
@classmethod
def collapse_macros(cls, macros_dict, specifiers_list):
for macro_specifier, macro in macros_dict.items():
if len(macro) == 1:
continue
for combination in cls.combinations(specifiers_list, len(macro)):
if cls.symmetric_difference(combination) == set(macro):
for item in combination:
specifiers_list.remove(item)
new_specifier_set = cls.intersect_combination(combination)
new_specifier_set.add(macro_specifier)
specifiers_list.append(frozenset(new_specifier_set))
def collapse_individual_specifier_set(macro_specifier, macro):
specifiers_to_remove = []
specifiers_to_add = []
for specifier_set in specifiers_list:
macro_set = set(macro)
if macro_set.intersection(specifier_set) == macro_set:
specifiers_to_remove.append(specifier_set)
specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
for specifier in specifiers_to_remove:
specifiers_list.remove(specifier)
for specifier in specifiers_to_add:
specifiers_list.append(specifier)
for macro_specifier, macro in macros_dict.items():
collapse_individual_specifier_set(macro_specifier, macro)
# FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
# It seems to be okay in 2.6.5 or later; until then, this is the implementation given
# in http://docs.python.org/library/itertools.html (from 2.7).
@staticmethod
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1 # pylint: disable=W0631
for j in range(i + 1, r): # pylint: disable=W0631
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
@classmethod
def intersect_combination(cls, combination):
return reduce(set.intersection, [set(specifiers) for specifiers in combination])
@classmethod
def symmetric_difference(cls, iterable):
union = set()
intersection = iterable[0]
for item in iterable:
union = union | item
intersection = intersection.intersection(item)
return union - intersection
def to_specifiers_list(self, test_configuration_set):
"""Convert a set of TestConfiguration instances into one or more list of specifiers."""
# Easy out: if the set is all configurations, the specifier is empty.
if len(test_configuration_set) == len(self._all_test_configurations):
return [[]]
# 1) Build a list of specifier sets, discarding specifiers that don't add value.
specifiers_list = []
for config in test_configuration_set:
values = set(config.values())
for specifier, junk_specifier_set in self._junk_specifier_combinations.items():
if specifier in values:
values -= junk_specifier_set
specifiers_list.append(frozenset(values))
def try_collapsing(size, collapsing_sets):
if len(specifiers_list) < size:
return False
for combination in self.combinations(specifiers_list, size):
if self.symmetric_difference(combination) in collapsing_sets:
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(self.intersect_combination(combination)))
return True
return False
# 2) Collapse specifier sets with common specifiers:
# (xp, release), (xp, debug) --> (xp, x86)
for size, collapsing_sets in self._collapsing_sets_by_size.items():
while try_collapsing(size, collapsing_sets):
pass
def try_abbreviating(collapsing_sets):
if len(specifiers_list) < 2:
return False
for combination in self.combinations(specifiers_list, 2):
for collapsing_set in collapsing_sets:
diff = self.symmetric_difference(combination)
if diff <= collapsing_set:
common = self.intersect_combination(combination)
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(common | diff))
return True
return False
# 3) Abbreviate specifier sets by combining specifiers across categories.
# (xp, release), (win7, release) --> (xp, win7, release)
while try_abbreviating(self._collapsing_sets_by_size.values()):
pass
# 4) Substitute specifier subsets that match macros witin each set:
# (xp, win7, release) -> (win, release)
self.collapse_macros(self._configuration_macros, specifiers_list)
macro_keys = set(self._configuration_macros.keys())
# 5) Collapsing macros may have created combinations the can now be abbreviated.
# (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
pass
# 6) Remove cases where we have collapsed but have all macros.
# (android, win, mac, linux, release) --> (release)
specifiers_to_remove = []
for specifier_set in specifiers_list:
if macro_keys <= specifier_set:
specifiers_to_remove.append(specifier_set)
for specifier_set in specifiers_to_remove:
specifiers_list.remove(specifier_set)
specifiers_list.append(frozenset(specifier_set - macro_keys))
return specifiers_list
| bsd-3-clause | 3,058,140,010,150,910,000 | 43.451299 | 155 | 0.62983 | false |
denisff/python-for-android | python-build/python-libs/gdata/src/gdata/spreadsheet/__init__.py | 147 | 17942 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Spreadsheets.
"""
__author__ = '[email protected] (Laura Beth Lincoln)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
import re
import string
# XML namespaces which are often used in Google Spreadsheets entities.
GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006'
GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets'
'/2006/extended')
GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets'
'/2006/extended}%s')
class ColCount(atom.AtomBase):
"""The Google Spreadsheets colCount element """
_tag = 'colCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ColCountFromString(xml_string):
return atom.CreateClassFromXMLString(ColCount, xml_string)
class RowCount(atom.AtomBase):
"""The Google Spreadsheets rowCount element """
_tag = 'rowCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RowCountFromString(xml_string):
return atom.CreateClassFromXMLString(RowCount, xml_string)
class Cell(atom.AtomBase):
"""The Google Spreadsheets cell element """
_tag = 'cell'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['row'] = 'row'
_attributes['col'] = 'col'
_attributes['inputValue'] = 'inputValue'
_attributes['numericValue'] = 'numericValue'
def __init__(self, text=None, row=None, col=None, inputValue=None,
numericValue=None, extension_elements=None, extension_attributes=None):
self.text = text
self.row = row
self.col = col
self.inputValue = inputValue
self.numericValue = numericValue
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CellFromString(xml_string):
return atom.CreateClassFromXMLString(Cell, xml_string)
class Custom(atom.AtomBase):
"""The Google Spreadsheets custom element"""
_namespace = GSPREADSHEETS_EXTENDED_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, column=None, text=None, extension_elements=None,
extension_attributes=None):
self.column = column # The name of the column
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.column)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.column))
self._AddMembersToElementTree(new_tree)
return new_tree
def _HarvestElementTree(self, tree):
namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1)
self.column = local_tag
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
self.text = tree.text
def CustomFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _CustomFromElementTree(element_tree)
def _CustomFromElementTree(element_tree):
namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1)
if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE:
new_custom = Custom()
new_custom._HarvestElementTree(element_tree)
new_custom.column = local_tag
return new_custom
return None
class SpreadsheetsSpreadsheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Spreadsheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsSpreadsheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet,
xml_string)
class SpreadsheetsWorksheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Worksheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
row_count=None, col_count=None, text=None, extension_elements=None,
extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.row_count = row_count
self.col_count = col_count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsWorksheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheet,
xml_string)
class SpreadsheetsCell(gdata.BatchEntry):
"""A Google Spreadsheets flavor of a Cell Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
cell=None, batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.updated = updated
self.cell = cell
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsCellFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCell,
xml_string)
class SpreadsheetsList(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a List Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
custom=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.custom = custom or {}
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0:
# If this is in the custom namespace, make add it to the custom dict.
name = child_tree.tag[child_tree.tag.index('}')+1:]
custom = _CustomFromElementTree(child_tree)
if custom:
self.custom[name] = custom
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for name, custom in self.custom.iteritems():
custom._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
def SpreadsheetsListFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsList,
xml_string)
element_tree = ElementTree.fromstring(xml_string)
return _SpreadsheetsListFromElementTree(element_tree)
class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsSpreadsheet])
def SpreadsheetsSpreadsheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed,
xml_string)
class SpreadsheetsWorksheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsWorksheet])
def SpreadsheetsWorksheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed,
xml_string)
class SpreadsheetsCellsFeed(gdata.BatchFeed):
"""A feed containing Google Spreadsheets Cells"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsCell])
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None, row_count=None,
col_count=None, interrupted=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text, interrupted=interrupted)
self.row_count = row_count
self.col_count = col_count
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def SpreadsheetsCellsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed,
xml_string)
class SpreadsheetsListFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsList])
def SpreadsheetsListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsListFeed,
xml_string)
| apache-2.0 | -5,686,754,355,077,866,000 | 36.852321 | 78 | 0.653718 | false |
overcome/elasticsearch | dev-tools/upload-s3.py | 255 | 2375 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 upload requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def upload_s3(conn, path, key, file, bucket):
print 'Uploading %s to Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.create_bucket(bucket)
k = bucket.new_key(os.path.join(path, key))
k.set_contents_from_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Uploads files to Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='the branch to release from', required=True)
parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org',
help='The S3 Bucket to upload to')
parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch',
help='The key path to use')
parser.add_argument('--key', '-k', metavar='key', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
upload_s3(connection, args.path, key, args.file, args.bucket);
| apache-2.0 | 4,046,383,687,613,325,300 | 34.447761 | 115 | 0.683789 | false |
wilvk/ansible | lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py | 27 | 7796 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: profitbricks_volume_attachments
short_description: Attach or detach a volume.
description:
- Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The name of the server you wish to detach or attach the volume.
required: true
volume:
description:
- The volume name or ID.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin ([email protected])
'''
EXAMPLES = '''
# Attach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: present
# Detach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: absent
'''
import re
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService
except ImportError:
HAS_PB_SDK = False
from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile(
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def attach_volume(module, profitbricks):
"""
Attaches a volume.
This will attach a volume to the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was attached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.attach_volume(datacenter, server, volume)
def detach_volume(module, profitbricks):
"""
Detaches a volume.
This will remove a volume from the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was detached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.detach_volume(datacenter, server, volume)
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
volume=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
if not module.params.get('volume'):
module.fail_json(msg='volume parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
try:
(changed) = detach_volume(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
elif state == 'present':
try:
attach_volume(module, profitbricks)
module.exit_json()
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 2,936,279,198,317,040,000 | 28.530303 | 128 | 0.620703 | false |
bright-sparks/chromium-spacewalk | tools/perf/measurements/record_per_area_unittest.py | 33 | 1163 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import record_per_area
from telemetry.core import wpr_modes
from telemetry.unittest import options_for_unittests
from telemetry.unittest import page_test_test_case
from telemetry.unittest import test
class RecordPerAreaUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for record_per_area measurement
Runs record_per_area measurement on a simple page and verifies
that all metrics were added to the results. The test is purely functional,
i.e. it only checks if the metrics are present and non-zero.
"""
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
@test.Disabled('android')
def testRecordPerArea(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = record_per_area.RecordPerArea()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
| bsd-3-clause | -2,837,092,964,527,429,000 | 39.103448 | 79 | 0.766122 | false |
bolkedebruin/airflow | airflow/_vendor/nvd3/cumulativeLineChart.py | 6 | 4048 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
class cumulativeLineChart(TemplateMixin, NVD3Chart):
"""
A cumulative line chart is used when you have one important grouping representing
an ordered set of data and one value to show, summed over time.
Python example::
from nvd3 import cumulativeLineChart
chart = cumulativeLineChart(name='cumulativeLineChart', x_is_date=True)
xdata = [1365026400000000, 1365026500000000, 1365026600000000]
ydata = [6, 5, 1]
y2data = [36, 55, 11]
extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}}
chart.add_serie(name="Serie 1", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " mins"}}
chart.add_serie(name="Serie 2", y=y2data, x=xdata, extra=extra_serie)
chart.buildhtml()
Javascript generated:
.. raw:: html
<div id="cumulativeLineChart"><svg style="height:450px; width:100%"></svg></div>
<script>
data_cumulativeLineChart=[{"values": [{"y": 6, "x": 1365026400000000},
{"y": 5, "x": 1365026500000000},
{"y": 1, "x": 1365026600000000}],
"key": "Serie 1", "yAxis": "1"},
{"values": [{"y": 36, "x": 1365026400000000},
{"y": 55, "x": 1365026500000000},
{"y": 11, "x": 1365026600000000}], "key": "Serie 2", "yAxis": "1"}];
nv.addGraph(function() {
var chart = nv.models.cumulativeLineChart();
chart.margin({top: 30, right: 60, bottom: 20, left: 60});
var datum = data_cumulativeLineChart;
chart.xAxis
.tickFormat(function(d) { return d3.time.format('%d %b %Y')(new Date(parseInt(d))) });
chart.yAxis
.tickFormat(d3.format(',.1%'));
chart.tooltipContent(function(key, y, e, graph) {
var x = d3.time.format("%d %b %Y")(new Date(parseInt(graph.point.x)));
var y = String(graph.point.y);
if(key == 'Serie 1'){
var y = 'There are ' + String(e) + ' calls';
}if(key == 'Serie 2'){
var y = String(e) + ' mins';
}
tooltip_str = '<center><b>'+key+'</b></center>' + y + ' on ' + x;
return tooltip_str;
});
chart.showLegend(true);
d3.select('#cumulativeLineChart svg')
.datum(datum)
.transition().duration(500)
.attr('height', 450)
.call(chart); });
</script>
"""
CHART_FILENAME = "./cumulativelinechart.html"
template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = 'cumulativeLineChart'
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
self.create_x_axis('xAxis', format=kwargs.get(
'x_axis_format', '.2f'))
self.create_y_axis('yAxis', format=kwargs.get('y_axis_format', '.1%'))
self.set_graph_height(height)
if width:
self.set_graph_width(width)
| apache-2.0 | -8,209,313,798,998,288,000 | 37.923077 | 114 | 0.513587 | false |
leeamen/eva | 2017/cutwords.py | 1 | 2739 | #!/usr/bin/python
#coding:utf-8
import mybaselib
import logging
import jieba
import jieba.analyse
import numpy as np
import csv
import sys
import stat
import os
import re
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Row(object):
def __init__(self, row):
arr = filter(lambda x:len(x.strip()) > 0, re.split(' ', row))
if len(arr) is not 2:
print 'row:',row
sys.exit()
self.sentence = arr[0]
self.class_label = arr[1]
def GetSentence(self):
return self.sentence
def GetClassLabel(self):
return self.class_label
def walktree(dirname, callback, userdata):
for f in os.listdir(dirname):
pathname = os.path.join(dirname, f)
mode = os.stat(pathname).st_mode
if stat.S_ISDIR(mode):
pass
# It's a directory, recurse into it
#walktree(pathname, callback)
elif stat.S_ISREG(mode):
# It's a file, call the callback function
callback(pathname, userdata)
else:
# Unknown file type, print a message
logger.error('Skipping %s', pathname)
def cutwords(filename, userdata):
if '.txt' != filename[-4:]:
return
logger.debug('start process file:%s', filename)
wf = open(userdata['output'], 'a')
with open(filename, 'rb') as rf:
while True:
line = rf.readline()
if line is None or len(line) == 0:
break;
row = Row(line)
sentence = row.GetSentence()
#切词
cut_list = jieba.lcut(sentence, cut_all = False)
wf.write(' '.join(cut_list) + '\n')
wf.flush()
wf.close()
if __name__ == '__main__':
#加载自定义词典
jieba.load_userdict('./data/userdict.data')
sentence = '该类会将文本中的词语转换为词频矩阵'
sentence = '春川辣炒鸡排外表是古典的吗?'
print '|'.join(jieba.lcut(sentence, cut_all = False))
#所有txt文件切词
userdata = {}
userdata['output'] = './data/all.cuts'
os.system('rm -f ./data/all.cuts')
walktree('data', cutwords, userdata)
sys.exit()
# jieba.analyse.extract_tags(sentence, topK=20, withWeight=False, allowPOS=())
#自己设置语料库
# corpus_file = '***.corpus'
# tags = jieba.analyse.extract_tags('该类会将文本中的词语转换为词频矩阵', topK=5)
# print '|'.join(tags)
filename = sys.argv[1]
wf = open(filename + '.cuts', 'wb')
with open(filename, 'rb') as rf:
while True:
line = rf.readline()
if line is None or len(line) == 0:
break;
row = mybaselib.Row(line)
sentence = row.GetSentence()
sentence = sentence.strip()
#切词
cut_list = jieba.lcut(sentence, cut_all = False)
wf.write(' '.join(cut_list) + ' ')
wf.flush()
wf.close()
| apache-2.0 | 2,398,056,912,510,483,500 | 25.232323 | 79 | 0.628802 | false |
asrar7787/Test-Frontools | node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | 10585 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
| mit | 7,521,287,484,674,701,000 | 38.203704 | 80 | 0.671138 | false |
abhinavmoudgil95/root | tutorials/tmva/keras/MulticlassKeras.py | 15 | 2346 | #!/usr/bin/env python
from ROOT import TMVA, TFile, TTree, TCut, gROOT
from os.path import isfile
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.regularizers import l2
from keras import initializations
from keras.optimizers import SGD
# Setup TMVA
TMVA.Tools.Instance()
TMVA.PyMethodBase.PyInitialize()
output = TFile.Open('TMVA.root', 'RECREATE')
factory = TMVA.Factory('TMVAClassification', output,
'!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass')
# Load data
if not isfile('tmva_example_multiple_background.root'):
createDataMacro = str(gROOT.GetTutorialDir()) + '/tmva/createData.C'
print(createDataMacro)
gROOT.ProcessLine('.L {}'.format(createDataMacro))
gROOT.ProcessLine('create_MultipleBackground(4000)')
data = TFile.Open('tmva_example_multiple_background.root')
signal = data.Get('TreeS')
background0 = data.Get('TreeB0')
background1 = data.Get('TreeB1')
background2 = data.Get('TreeB2')
dataloader = TMVA.DataLoader('dataset')
for branch in signal.GetListOfBranches():
dataloader.AddVariable(branch.GetName())
dataloader.AddTree(signal, 'Signal')
dataloader.AddTree(background0, 'Background_0')
dataloader.AddTree(background1, 'Background_1')
dataloader.AddTree(background2, 'Background_2')
dataloader.PrepareTrainingAndTestTree(TCut(''),
'SplitMode=Random:NormMode=NumEvents:!V')
# Generate model
# Define initialization
def normal(shape, name=None):
return initializations.normal(shape, scale=0.05, name=name)
# Define model
model = Sequential()
model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5), input_dim=4))
#model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5)))
model.add(Dense(4, init=normal, activation='softmax'))
# Set loss and optimizer
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy',])
# Store model to file
model.save('model.h5')
model.summary()
# Book methods
factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher',
'!H:!V:Fisher:VarTransform=D,G')
factory.BookMethod(dataloader, TMVA.Types.kPyKeras, "PyKeras",
'H:!V:VarTransform=D,G:FilenameModel=model.h5:NumEpochs=20:BatchSize=32')
# Run TMVA
factory.TrainAllMethods()
factory.TestAllMethods()
factory.EvaluateAllMethods()
| lgpl-2.1 | 6,349,441,161,328,347,000 | 31.583333 | 93 | 0.757033 | false |
mvaled/OpenUpgrade | addons/survey/controllers/main.py | 75 | 21298 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import logging
import werkzeug
import werkzeug.utils
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF, ustr
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("survey.auth_required", {'survey': survey})
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
vals = {'survey_id': survey.id}
if request.website.user_id.id != uid:
vals['partner_id'] = request.registry['res.users'].browse(cr, uid, uid, context=context).partner_id.id
user_input_id = user_input_obj.create(cr, uid, vals, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
#special case if you click "previous" from the last page, then leave the survey, then reopen it from the URL, avoid crash
if not page:
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=True, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, uid, user_input_id, question, post, answer_tag, context=context)
user_input = user_input_obj.browse(cr, uid, user_input_id, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, uid, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template ='survey.result'
current_filters = []
filter_display_data = []
filter_finish = False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, survey, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
# Quick retroengineering of what is injected into the template for now:
# (TODO: flatten and simplify this)
#
# survey: a browse record of the survey
# survey_dict: very messy dict containing all the info to display answers
# {'page_ids': [
#
# ...
#
# {'page': browse record of the page,
# 'question_ids': [
#
# ...
#
# {'graph_data': data to be displayed on the graph
# 'input_summary': number of answered, skipped...
# 'prepare_result': {
# answers displayed in the tables
# }
# 'question': browse record of the question_ids
# }
#
# ...
#
# ]
# }
#
# ...
#
# ]
# }
#
# page_range: pager helper function
# current_filters: a list of ids
# filter_display_data: [{'labels': ['a', 'b'], question_text} ... ]
# filter_finish: boolean => only finished surveys or not
#
def prepare_result_dict(self,survey, current_filters=None):
"""Returns dictionary having values for rendering template"""
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = {'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=None):
'''Returns formatted data required by graph library on basis of filter'''
# TODO refactor this terrible method and merge it with prepare_result_dict
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': ustr(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for row in data['rows']:
values.append({'text': data['rows'].get(row), 'count': data['result'].get((row, answer))})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
| agpl-3.0 | -4,964,978,169,292,348,000 | 48.761682 | 359 | 0.571321 | false |
ksmaheshkumar/weevely3 | utils/code.py | 15 | 1763 | from core.loggers import log
from distutils import spawn
from core import messages
import subprocess
# Minify PHP code removing white spaces and comments.
# Returns None in case of errors.
def minify_php(original_code):
php_binary = spawn.find_executable('php')
if not php_binary:
log.debug(messages.utils_code.minify_php_missing_binary)
return None
try:
output = subprocess.check_output(
[
php_binary, '-r', """function is_label($str) {
return preg_match('~[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*+~',$str);
}
function get_tiny($snippet,
$remove_whitespace=TRUE,
$remove_comments=TRUE) {
//generate tokens from snippet
$tokens = token_get_all($snippet);
//replace all variables, remove whitespace, remove comments
$new_source = '';
foreach ($tokens as $i => $token) {
if(!is_array($token)) {
$new_source .= $token;
continue;
}
if($remove_comments) {
if(in_array($token[0],array(T_COMMENT,T_DOC_COMMENT))) {
continue;
}
}
if ($token[0] == T_WHITESPACE && $remove_whitespace) {
if (isset($tokens[$i-1]) && isset($tokens[$i+1]) && is_array($tokens[$i-1]) && is_array($tokens[$i+1]) && is_label($tokens[$i-1][1]) && is_label($tokens[$i+1][1])) {
$new_source .= ' ';
}
} elseif($token[0]==T_CASE) {
$new_source .= $token[1].' ';
} else {
$new_source .= $token[1];
}
}
return $new_source;
}
$d=<<<'EOD'
%s
EOD;
print(get_tiny($d));
""" % ('<?php %s ?>' % str(original_code)),
])
except Exception as e:
import traceback; log.debug(traceback.format_exc())
log.debug(messages.utils_code.minify_php_error_minifying)
return None
if len(output) < 8:
return None
return output[6:-2]
| gpl-3.0 | 8,845,234,821,779,462,000 | 23.830986 | 169 | 0.600113 | false |
fxfitz/ansible | lib/ansible/modules/network/avi/avi_cloudconnectoruser.py | 41 | 4186 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
| gpl-3.0 | 2,846,207,535,336,835,600 | 30.712121 | 92 | 0.601768 | false |
2013Commons/hue | desktop/core/ext-py/requests-2.0.0/requests/packages/urllib3/_collections.py | 76 | 2898 | # urllib3/_collections.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import RLock
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
| apache-2.0 | 4,353,229,331,775,511,600 | 29.829787 | 92 | 0.624569 | false |
zengluyang/ns3-d2d | src/core/bindings/modulegen_customizations.py | 121 | 7665 | import re
import os
import sys
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass, param, retval
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'\n'
'#if PY_VERSION_HEX >= 0x03000000\n'
'!PyUnicode_Check(item)\n'
'#else\n'
'!PyString_Check(item)\n'
'#endif\n',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'#if PY_VERSION_HEX >= 0x03000000\n'
'{var}[{idx}] = PyUnicode_AsUTF8(item);\n'
'#else\n'
'{var}[{idx}] = PyString_AsString(item);\n'
'#endif\n'
.format(var=name, idx=idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleWithContext(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleWithContext", "_wrap_Simulator_ScheduleWithContext",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
def post_register_types(root_module):
Simulator_customizations(root_module)
CommandLine_customizations(root_module)
TypeId_customizations(root_module)
add_std_ofstream(root_module)
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'Threading' not in enabled_features:
for clsname in ['SystemThread', 'SystemMutex', 'SystemCondition', 'CriticalSection',
'SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'RealTime' not in enabled_features:
for clsname in ['WallClockSynchronizer', 'RealtimeSimulatorImpl']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::RealtimeSimulatorImpl::SynchronizationMode'])
root_module.after_init.write_code("PyEval_InitThreads();")
# these are already in the main script, so commented out here
# Object_customizations(root_module)
# Attribute_customizations(root_module)
#def post_register_functions(root_module):
# pass
| gpl-2.0 | -254,736,945,795,681,200 | 41.115385 | 118 | 0.622048 | false |
evansd/django | django/conf/locale/sk/formats.py | 65 | 1106 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause | 3,139,702,909,327,997,400 | 37.137931 | 77 | 0.587703 | false |
watonyweng/horizon | openstack_dashboard/api/rest/network.py | 50 | 1431 |
# Copyright 2015, Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the network abstraction APIs.
"""
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
@urls.register
class SecurityGroups(generic.View):
"""API for Network Abstraction
Handles differences between Nova and Neutron.
"""
url_regex = r'network/securitygroups/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of security groups.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/network/securitygroups
"""
security_groups = api.network.security_group_list(request)
return {'items': [sg.to_dict() for sg in security_groups]}
| apache-2.0 | 4,176,712,427,878,167,000 | 30.108696 | 75 | 0.71768 | false |
fener06/pyload | module/lib/Getch.py | 43 | 2048 | class Getch:
"""
Gets a single character from standard input. Does not echo to
the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty
import sys
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg) | gpl-3.0 | -4,606,621,956,655,255,000 | 25.960526 | 78 | 0.555664 | false |
VMatrixTeam/open-matrix | src/webservice/model/snippet/snippet.py | 1 | 2067 | import model.base
import tornado.gen
import json
from MySQLdb import escape_string
class Snippet(object):
@staticmethod
@tornado.gen.coroutine
def get_snippets(id_from, count):
result = yield model.MatrixDB.query("select * from snippet_snippet order by createAt desc limit {0}, {1}".format(id_from, count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippet_count():
result = yield model.MatrixDB.get("select count(*) as count from snippet_snippet")
raise tornado.gen.Return(result.count)
@staticmethod
@tornado.gen.coroutine
def get_top_snippets_by_count(count):
result = yield model.MatrixDB.query("\
select ss.sid as sid, max(ss.author) as author, max(ss.createAt) as createAt, max(ss.content) as content, max(ss.code) as code, max(ss.pictures) as pictures, count(*) as count \
from snippet_snippet ss left join snippet_praise sp on ss.sid = sp.sid \
group by ss.sid \
order by count desc \
limit 0, {0}".format(count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippets_by_uid_latest_count(user_id, count):
result = yield model.MatrixDB.query("select * from snippet_snippet where author = {0} order by createAt desc limit 0, {1}".format(user_id, count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippet_by_sid(sid):
result = yield model.MatrixDB.get("select * from snippet_snippet where sid = {0}".format(sid))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def create_snippet(content, code, pictures, user_id):
row_id = yield model.MatrixDB.execute("insert into snippet_snippet (author, createAt, content, code, pictures) values ({0}, now(), '{1}', '{2}', '{3}')".format(user_id, escape_string(content), escape_string(code), escape_string(json.dumps(pictures))))
raise tornado.gen.Return(row_id)
| mit | -5,610,992,443,019,199,000 | 42.978723 | 259 | 0.670537 | false |
tudorian/eden | controllers/setup.py | 23 | 9689 | # -*- coding: utf-8 -*-
"""
Setup Tool
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
def index():
""" Show the index """
return dict()
# -----------------------------------------------------------------------------
def deployment():
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
#s3db.configure("setup_deployment", onvalidation=validate_deployment)
crud_form = S3SQLCustomForm("name",
"distro",
"remote_user",
"secret_key",
"access_key",
"private_key",
"webserver_type",
"db_type",
"db_password",
"db_type",
"db_password",
"repo_url",
"template",
S3SQLInlineComponent("server",
label = T("Server Role"),
fields = ["role", "host_ip", "hostname"],
),
S3SQLInlineComponent("instance",
label = T("Instance Type"),
fields = ["type", "url", "prepop_options"],
#filterby=dict(field = "type",
#options = ["prod", "demo"]
#),
multiple = False,
),
)
s3db.configure("setup_deployment", crud_form=crud_form)
def prep(r):
if r.method in ("create", None):
s3.scripts.append("/%s/static/scripts/S3/s3.setup.js" % appname)
if r.interactive:
if r.component and r.id:
# Set up the prepop options according to the template
prepop_options = s3db.setup_get_prepop_options(r.record.template)
db.setup_instance.prepop_options.requires = IS_IN_SET(prepop_options, multiple=True)
# No new servers once deployment is created
s3db.configure("setup_server",
insertable = False
)
# Check if no scheduler task is pending
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
((sctable.status != "COMPLETED") & \
(sctable.status != "FAILED"))
rows = db(query).select(itable.scheduler_id,
join = itable.on(itable.scheduler_id == sctable.id)
)
if rows:
# Disable creation of new instances
s3db.configure("setup_instance",
insertable = False
)
elif r.component.name == "instance":
if r.method in (None, "create"):
# Remove deployed instances from drop down
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
(sctable.status == "COMPLETED")
rows = db(query).select(itable.type,
join = itable.on(itable.scheduler_id == sctable.id)
)
types = {1: "prod", 2: "test", 3: "demo", 4: "dev"}
for row in rows:
del types[row.type]
itable.type.requires = IS_IN_SET(types)
return True
s3.prep = prep
def postp(r, output):
if r.component is None:
if r.method in (None, "read") and r.id:
# get scheduler status for the last queued task
itable = db.setup_instance
sctable = db.scheduler_task
query = (db.setup_instance.deployment_id == r.id)
row = db(query).select(sctable.id,
sctable.status,
join = itable.on(itable.scheduler_id==sctable.id),
orderby = itable.scheduler_id
).last()
item_append = output["item"][0].append
item_append(TR(TD(LABEL("Status"), _class="w2p_fl")))
item_append(TR(TD(row.status)))
if row.status == "FAILED":
resource = s3db.resource("scheduler_run")
task = db(resource.table.task_id == row.id).select().first()
item_append(TR(TD(LABEL("Traceback"), _class="w2p_fl")))
item_append(TR(TD(task.traceback)))
item_append(TR(TD(LABEL("Output"), _class="w2p_fl")))
item_append(TR(TD(task.run_output)))
elif r.component.name == "instance":
if r.method in (None, "read"):
s3.actions = [{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "clean",
"deployment": r.id,
}
),
"_class": "action-btn",
"label": "Clean"
},
{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "eden",
"deployment": r.id
}
),
"_class": "action-btn",
"label": "Upgrade Eden"
},
]
return output
s3.postp = postp
return s3_rest_controller(rheader=s3db.setup_rheader)
# -----------------------------------------------------------------------------
def management():
try:
_id = get_vars["instance"]
deployment_id = get_vars["deployment"]
_type = get_vars["type"]
except:
session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
# Check if management task already running
exists = s3db.setup_management_exists(_type, _id, deployment_id)
if exists:
current.session.error = T("A management task is running for the instance")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# Check if instance was successfully deployed
ttable = s3db.scheduler_task
itable = s3db.setup_instance
query = (ttable.status == "COMPLETED") & \
(itable.id == _id)
success = db(query).select(itable.id,
join=ttable.on(ttable.id == itable.scheduler_id),
limitby=(0, 1)).first()
if success:
# add the task to scheduler
current.s3task.schedule_task("setup_management",
args = [_type, _id, deployment_id],
timeout = 3600,
repeats = 1,
)
current.session.flash = T("Task queued in scheduler")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
else:
current.session.error = T("The instance was not successfully deployed")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# -----------------------------------------------------------------------------
def prepop_setting():
if request.ajax:
template = request.post_vars.get("template")
return json.dumps(s3db.setup_get_prepop_options(template))
# -----------------------------------------------------------------------------
def refresh():
try:
id = request.args[0]
except:
current.session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
result = s3db.setup_refresh(id)
if result["success"]:
current.session.flash = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
else:
current.session.error = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
# -----------------------------------------------------------------------------
def upgrade_status():
if request.ajax:
_id = request.post_vars.get("id")
status = s3db.setup_upgrade_status(_id)
if status:
return json.dumps(status)
| mit | 2,484,693,671,827,707,400 | 40.943723 | 100 | 0.397151 | false |
sdague/home-assistant | homeassistant/components/flexit/climate.py | 16 | 5180 | """Platform for Flexit AC units with CI66 Modbus adapter."""
import logging
from typing import List
from pyflexit.pyflexit import pyflexit
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.components.modbus.const import CONF_HUB, DEFAULT_HUB, MODBUS_DOMAIN
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_NAME,
CONF_SLAVE,
DEVICE_DEFAULT_NAME,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(CONF_SLAVE): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flexit Platform."""
modbus_slave = config.get(CONF_SLAVE)
name = config.get(CONF_NAME)
hub = hass.data[MODBUS_DOMAIN][config.get(CONF_HUB)]
add_entities([Flexit(hub, modbus_slave, name)], True)
class Flexit(ClimateEntity):
"""Representation of a Flexit AC unit."""
def __init__(self, hub, modbus_slave, name):
"""Initialize the unit."""
self._hub = hub
self._name = name
self._slave = modbus_slave
self._target_temperature = None
self._current_temperature = None
self._current_fan_mode = None
self._current_operation = None
self._fan_modes = ["Off", "Low", "Medium", "High"]
self._current_operation = None
self._filter_hours = None
self._filter_alarm = None
self._heat_recovery = None
self._heater_enabled = False
self._heating = None
self._cooling = None
self._alarm = False
self.unit = pyflexit(hub, modbus_slave)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update unit attributes."""
if not self.unit.update():
_LOGGER.warning("Modbus read failed")
self._target_temperature = self.unit.get_target_temp
self._current_temperature = self.unit.get_temp
self._current_fan_mode = self._fan_modes[self.unit.get_fan_speed]
self._filter_hours = self.unit.get_filter_hours
# Mechanical heat recovery, 0-100%
self._heat_recovery = self.unit.get_heat_recovery
# Heater active 0-100%
self._heating = self.unit.get_heating
# Cooling active 0-100%
self._cooling = self.unit.get_cooling
# Filter alarm 0/1
self._filter_alarm = self.unit.get_filter_alarm
# Heater enabled or not. Does not mean it's necessarily heating
self._heater_enabled = self.unit.get_heater_enabled
# Current operation mode
self._current_operation = self.unit.get_operation
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
"filter_hours": self._filter_hours,
"filter_alarm": self._filter_alarm,
"heat_recovery": self._heat_recovery,
"heating": self._heating,
"heater_enabled": self._heater_enabled,
"cooling": self._cooling,
}
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_COOL]
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._fan_modes
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.unit.set_temp(self._target_temperature)
def set_fan_mode(self, fan_mode):
"""Set new fan mode."""
self.unit.set_fan_speed(self._fan_modes.index(fan_mode))
| apache-2.0 | 6,408,234,200,811,458,000 | 30.779141 | 86 | 0.630502 | false |
chemelnucfin/tensorflow | tensorflow/python/keras/distribute/multi_worker_callback_test.py | 2 | 23430 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Keras multi worker callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import threading
from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import keras
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.platform import test
def get_strategy_object(strategy_cls):
if strategy_cls == mirrored_strategy.MirroredStrategy:
return strategy_cls(mirrored_strategy.all_local_devices())
else:
# CollectiveAllReduceStrategy and ParameterServerStrategy.
return strategy_cls()
def generate_callback_test_function(custom_callable):
"""Generic template for callback tests using mnist synthetic dataset."""
@combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[collective_strategy.CollectiveAllReduceStrategy],
required_gpus=[0, 1],
file_format=['h5', 'tf']))
def test_template(self, strategy_cls, file_format):
num_workers = 2
num_epoch = 2
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
strategy = get_strategy_object(strategy_cls)
batch_size = 64
steps = 2
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
custom_callable(
model,
self,
train_ds,
num_epoch,
steps,
strategy,
saving_filepath=kwargs['saving_filepath'],
barrier=kwargs['barrier'],
threading_local=kwargs['threading_local'])
# Pass saving_filepath from the parent thread to ensure every worker has the
# same fileapth to save.
saving_filepath = os.path.join(self.get_temp_dir(),
'checkpoint.' + file_format)
barrier = dc._Barrier(2)
threading_local = threading.local()
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
saving_filepath=saving_filepath,
barrier=barrier,
threading_local=threading_local)
self.assertFalse(training_state.checkpoint_exists(saving_filepath))
threads_to_join = []
strategy = get_strategy_object(strategy_cls)
if strategy.extended.experimental_between_graph:
for ts in threads.values():
threads_to_join.extend(ts)
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
return test_template
class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase,
parameterized.TestCase):
# The callables of the actual testing content to be run go below.
@staticmethod
def callableForTestChiefOnlyCallback(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
class ChiefOnly(keras.callbacks.Callback):
def __init__(self):
self._chief_worker_only = True
self.filtered_correctly = True
def on_train_begin(self, logs):
if not multi_worker_util.is_chief():
# Non-chief workers shouldn't run this callback.
self.filtered_correctly = False
cb = ChiefOnly()
model.fit(
x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[cb])
test_obj.assertTrue(cb.filtered_correctly)
@staticmethod
def callableForTestModelCheckpointSavesOnChiefButNotOtherwise(
model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath,
**kwargs):
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' %
(test_base.get_task_type(), test_base.get_task_index(), extension))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)])
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(
training_state.checkpoint_exists(saving_filepath), test_base.is_chief())
@staticmethod
def initialFitting(test_obj, model, train_ds, num_epoch, steps,
saving_filepath):
# The saving_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True)
])
# The saving_filepath should exist after fitting with callback. Both chief
# and non-chief worker should both see it exists (which was saved only by
# chief).
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
history_after_one_more_epoch = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
# The saving_filepath should continue to exist (if it did) after fitting
# without callback.
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
return saving_filepath, history_after_one_more_epoch
@staticmethod
def callableForTestLoadWeightFromModelCheckpoint(model, test_obj, train_ds,
num_epoch, steps, strategy,
saving_filepath, **kwargs):
filepaths = []
real_mkstemp = tempfile.mkstemp
def mocked_mkstemp():
# Only non-chief should call tempfile.mkstemp() inside fit() in sync
# training.
assert not test_base.is_chief()
file_handle, temp_file_name = real_mkstemp()
extension = os.path.splitext(saving_filepath)[1]
temp_filepath = temp_file_name + extension
filepaths.append(temp_filepath)
return file_handle, temp_file_name
# Mock tempfile.mkstemp() so the filepaths can be stored and verified later.
with test.mock.patch.object(tempfile, 'mkstemp', mocked_mkstemp):
saving_filepath, history_after_one_more_epoch = \
KerasMultiWorkerCallbackTest.initialFitting(
test_obj, model, train_ds, num_epoch, steps, saving_filepath)
with strategy.scope():
model.load_weights(saving_filepath)
history_after_loading_weight_and_one_more_epoch = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_loading_weight_and_one_more_epoch.history,
rtol=5e-5)
# Verify the temp files are indeed removed (no trace left behind).
for filepath in filepaths:
assert not training_state.checkpoint_exists(filepath)
@staticmethod
def callableForTestModelRestoreCallback(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
saving_filepath, history_after_one_more_epoch = \
KerasMultiWorkerCallbackTest.initialFitting(
test_obj, model, train_ds, num_epoch, steps, saving_filepath)
# The model should get restored to the weights previously saved, by
# adding a ModelCheckpoint callback (which results in a
# _ModelRestoreCallback being added), with load_weights_on_restart=True.
history_after_model_restoring_and_one_more_epoch = model.fit(
x=train_ds,
epochs=1,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True)
])
# Asserting the history one epoch after initial fitting and one epoch after
# restoring are closed.
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_model_restoring_and_one_more_epoch.history,
rtol=5e-5)
history_one_more_epoch_without_model_restoring = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
# Ensuring training for another epoch gives different result.
test_obj.assertNotAllClose(
history_after_model_restoring_and_one_more_epoch.history,
history_one_more_epoch_without_model_restoring.history,
rtol=5e-5)
@staticmethod
def callableForTestBackupModelRemoved(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
# `barrier` object needs to be passed in from parent
# thread so both threads refer to the same object.
barrier = kwargs['barrier']
num_epoch = 3
# Testing the backup filepath `multi_worker_training_state` uses.
_, backup_filepath = training_state._get_backup_filepath(saving_filepath)
# The backup_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
# Callback to verify that the backup file exists in the middle of training.
class BackupFilepathVerifyingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch > 1:
# Asserting that after the first two epochs, the backup file should
# exist.
test_obj.assertTrue(training_state.checkpoint_exists(backup_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True),
BackupFilepathVerifyingCallback()
])
# Sync on the two threads so we make sure the backup file is removed before
# we move on.
barrier.wait()
# The back up file should not exist at successful exit of `model.fit()`.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
@staticmethod
def callableForTestBackupModelNotRemovedIfInterrupted(model, test_obj,
train_ds, num_epoch,
steps, strategy,
saving_filepath,
**kwargs):
# `barrier` object needs to be passed in from parent
# thread so both threads refer to the same object.
barrier = kwargs['barrier']
num_epoch = 4
# Testing the backup filepath `multi_worker_training_state` uses.
_, backup_filepath = training_state._get_backup_filepath(saving_filepath)
# The backup_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
# Callback to interrupt in the middle of training.
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError('Interrupting!')
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True),
InterruptingCallback()
])
except RuntimeError as e:
if 'Interrupting!' not in e.message:
raise
# Sync on the two threads.
barrier.wait()
# The back up file should exist after interruption of `model.fit()`.
test_obj.assertTrue(training_state.checkpoint_exists(backup_filepath))
@staticmethod
def callableForTestUnmatchedModelFile(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
# The saving_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True)
])
(train_ds, _), (_, _) = testing_utils.get_test_data(
train_samples=10, test_samples=10, input_shape=(3,), num_classes=2)
# Switch to a model of different structure.
with strategy.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(5, input_dim=3, activation='relu'))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
if saving_filepath.endswith('.tf'):
test_obj.skipTest('Loading mismatched TF checkpoint would cause Fatal '
'Python error: Aborted. Skipping.')
# Unmatched format. Should raise ValueError.
with test_obj.assertRaisesRegexp(ValueError, 'Error loading file from'):
model.fit(
x=train_ds,
epochs=num_epoch,
batch_size=8,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True)
])
@staticmethod
def callableForTestReduceLROnPlateau(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
cbks = [
callbacks.ReduceLROnPlateau(
monitor='loss',
factor=0.1,
min_delta=1,
patience=1,
cooldown=5,
verbose=1)
]
# It is expected that the learning rate would drop by `factor` within
# 3 epochs with `min_delta=1`.
model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.0001, atol=1e-8)
# It is expected that the learning rate would drop by another `factor`
# within 3 epochs with `min_delta=1`.
model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.00001, atol=1e-8)
@staticmethod
def callableForTestEarlyStopping(model, test_obj, train_ds, num_epoch, steps,
strategy, saving_filepath, **kwargs):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor='loss', min_delta=0.05, patience=1, verbose=1),
epoch_counter_cbk
]
# Empirically, it is expected that `model.fit()` would terminate around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
@staticmethod
def callableForTestLearningRateScheduler(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
cbks = [
callbacks.LearningRateScheduler(
schedule=lambda x: 1. / (1. + x), verbose=1)
]
# It is expected that with `epochs=2`, the learning rate would drop to
# 1 / (1 + 2) = 0.5.
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.5, atol=1e-8)
# It is expected that with `epochs=4`, the learning rate would drop to
# 1 / (1 + 4) = 0.25.
model.fit(x=train_ds, epochs=4, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.25, atol=1e-8)
# pylint: disable=g-doc-args
@staticmethod
def callableForTestIntermediateDirForFTAreRemoved(model, test_obj, train_ds,
num_epoch, steps, strategy,
saving_filepath, **kwargs):
"""Testing that the temporary directory are removed.
Some temporary directories are created for the purpose of fault tolerance.
This test ensures that such directories should have been removed at the time
`model.fit()` finishes successfully.
"""
# `threading_local` and `barrier` objects have to be passed in from parent
# thread so both threads refer to the same object.
threading_local = kwargs['threading_local']
barrier = kwargs['barrier']
# Two threads will each has one copy of `temp_dirs_supposed_to_be_removed`
# list.
threading_local.temp_dirs_supposed_to_be_removed = []
callbacks_list = [
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True),
]
# Keep the references to the real function objects.
real_os_path_join = os.path.join
real_tempfile_mkdtemp = tempfile.mkdtemp
# Make a `os.path.join` wrapper, which will be patched onto the real
# function, so the temporary directories can be tracked.
def wrapper_os_path_join(path, *paths):
join_result = real_os_path_join(path, *paths)
if len(paths) == 1 and paths[0] == 'backup':
threading_local.temp_dirs_supposed_to_be_removed.append(join_result)
return join_result
# Likewise for `tempfile.mkdtemp`.
def wrapper_tempfile_mkdtemp():
result = real_tempfile_mkdtemp()
threading_local.temp_dirs_supposed_to_be_removed.append(result)
return result
# Now the two threads must sync here: if they are out of sync, one thread
# can go ahead and patch `os.path.join` while the other has not even
# assigned the real `os.path.join` to `real_os_path_join`. If this happened,
# the "real" `os.path.join` the slower thread would see is actually the
# wrapper of the other.
barrier.wait()
# Note that `os.path.join` will respect the second patch (there are two
# patches because of the two threads). Both threads will refer to the same
# copy of `wrapper_os_path_join` because of the `barrier` preceding
# `model.fit()`. Likewise for `wrapper_tempfile_mkdtemp`.
os.path.join = wrapper_os_path_join
tempfile.mkdtemp = wrapper_tempfile_mkdtemp
barrier.wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=callbacks_list)
# Sync before un-patching to prevent either thread from accessing the real
# functions. Also to make sure `model.fit()` is done on both threads (so we
# can safely assert the directories are removed).
barrier.wait()
os.path.join = real_os_path_join
tempfile.mkdtemp = real_tempfile_mkdtemp
# There should be directory (names) that are supposed to be removed.
test_obj.assertTrue(threading_local.temp_dirs_supposed_to_be_removed)
for temp_dir_supposed_to_be_removed in (
threading_local.temp_dirs_supposed_to_be_removed):
# They should have been removed and thus don't exist.
test_obj.assertFalse(os.path.exists(temp_dir_supposed_to_be_removed))
# The actual testing methods go here.
test_chief_only_callback = generate_callback_test_function(
callableForTestChiefOnlyCallback.__func__)
test_model_checkpoint_saves_on_chief_but_not_otherwise = \
generate_callback_test_function(
callableForTestModelCheckpointSavesOnChiefButNotOtherwise.__func__)
test_load_weight_from_model_checkpoint = generate_callback_test_function(
callableForTestLoadWeightFromModelCheckpoint.__func__)
test_model_restore_callback = generate_callback_test_function(
callableForTestModelRestoreCallback.__func__)
test_unmatched_model_file = generate_callback_test_function(
callableForTestUnmatchedModelFile.__func__)
test_reduce_lr_on_plateau = generate_callback_test_function(
callableForTestReduceLROnPlateau.__func__)
test_early_stopping = generate_callback_test_function(
callableForTestEarlyStopping.__func__)
test_learning_rate_scheduler = generate_callback_test_function(
callableForTestLearningRateScheduler.__func__)
test_intermediate_dir_for_ft_are_removed = generate_callback_test_function(
callableForTestIntermediateDirForFTAreRemoved.__func__)
test_backup_model_removed = generate_callback_test_function(
callableForTestBackupModelRemoved.__func__)
test_backup_model_not_removed_if_interrupted = \
generate_callback_test_function(
callableForTestBackupModelNotRemovedIfInterrupted.__func__)
if __name__ == '__main__':
with test.mock.patch.object(sys, 'exit', os._exit):
test.main()
| apache-2.0 | -4,112,443,133,832,969,700 | 38.510961 | 94 | 0.654673 | false |
VWApplications/VWCourses | accounts/migrations/0004_auto_20170207_0350.py | 1 | 2273 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 03:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20170203_0220'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=30, verbose_name='País')),
('city', models.CharField(max_length=30, verbose_name='Cidade')),
('state', models.CharField(max_length=30, verbose_name='Estado')),
('complement', models.CharField(blank=True, max_length=100, verbose_name='Complemento')),
],
options={
'verbose_name': 'Endereço',
'verbose_name_plural': 'Endereços',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DDD', models.IntegerField(verbose_name='DDD')),
('number', models.IntegerField(verbose_name='Número')),
],
options={
'verbose_name': 'Telefone',
'verbose_name_plural': 'Telefones',
},
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=30, verbose_name='Nome'),
),
migrations.AddField(
model_name='user',
name='address',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='accounts.Address', verbose_name='Endereço'),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='phone',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='accounts.Phone', verbose_name='Telefone'),
preserve_default=False,
),
]
| mpl-2.0 | -1,496,603,027,603,150,800 | 37.440678 | 143 | 0.554674 | false |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/tornado/template.py | 18 | 31175 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import bytes_type, ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes_type),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
f = open(path, "rb")
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
| gpl-3.0 | 1,744,600,793,663,657,500 | 34.998845 | 98 | 0.577129 | false |
beezee/GAE-Django-base-app | django/core/management/commands/flush.py | 249 | 3437 | from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlflush`` on the current database."
def handle_noargs(self, **options):
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
| bsd-3-clause | -3,501,974,911,753,701,400 | 40.914634 | 103 | 0.627 | false |
edulramirez/nova | nova/api/openstack/compute/server_diagnostics.py | 33 | 2605 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.os_compute_authorizer(ALIAS)
class ServerDiagnosticsController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
@extensions.expected_errors((404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
# NOTE(gmann): To make V21 same as V2 API, this method will call
# 'get_diagnostics' instead of 'get_instance_diagnostics'.
# In future, 'get_instance_diagnostics' needs to be called to
# provide VM diagnostics in a defined format for all driver.
# BP - https://blueprints.launchpad.net/nova/+spec/v3-diagnostics.
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except NotImplementedError:
common.raise_feature_not_supported()
class ServerDiagnostics(extensions.V21APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
resources = [
extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 | 2,786,218,885,550,157,000 | 37.308824 | 78 | 0.669866 | false |
bgxavier/neutron | neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py | 46 | 1411 | # Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import config
from neutron.common import config as common_config
from neutron.i18n import _LI
from neutron.plugins.hyperv.agent import config as hyperv_config
from neutron.plugins.hyperv.agent import l2_agent
LOG = logging.getLogger(__name__)
def register_options():
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(hyperv_config.HYPERV_AGENT_OPTS, "AGENT")
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging()
hyperv_agent = l2_agent.HyperVNeutronAgent()
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
hyperv_agent.daemon_loop()
| apache-2.0 | 2,026,935,660,131,399,700 | 31.068182 | 78 | 0.737775 | false |
arista-eosplus/ansible | lib/ansible/modules/network/aos/aos_blueprint.py | 78 | 9017 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_blueprint
author: [email protected] (@jeremyschulman)
version_added: "2.3"
short_description: Manage AOS blueprint instance
description:
- Apstra AOS Blueprint module let you manage your Blueprint easily. You can create
create and delete Blueprint by Name or ID. You can also use it to retrieve
all data from a blueprint. This module is idempotent
and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Blueprint to manage.
Only one of I(name) or I(id) can be set.
id:
description:
- AOS Id of the IP Pool to manage (can't be used to create a new IP Pool).
Only one of I(name) or I(id) can be set.
state:
description:
- Indicate what is the expected state of the Blueprint.
choices: ['present', 'absent', 'build-ready']
default: present
timeout:
description:
- When I(state=build-ready), this timeout identifies timeout in seconds to wait before
declaring a failure.
default: 5
template:
description:
- When creating a blueprint, this value identifies, by name, an existing engineering
design template within the AOS-server.
reference_arch:
description:
- When creating a blueprint, this value identifies a known AOS reference
architecture value. I(Refer to AOS-server documentation for available values).
'''
EXAMPLES = '''
- name: Creating blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
template: "my-template"
reference_arch: two_stage_l3clos
state: present
- name: Access a blueprint and get content
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
template: "{{ blueprint_template }}"
state: present
register: bp
- name: Delete a blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
state: absent
- name: Await blueprint build-ready, and obtain contents
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
state: build-ready
register: bp
'''
RETURNS = '''
name:
description: Name of the Blueprint
returned: always
type: str
sample: My-Blueprint
id:
description: AOS unique ID assigned to the Blueprint
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Information about the Blueprint
returned: always
type: dict
sample: {'...'}
contents:
description: Blueprint contents data-dictionary
returned: always
type: dict
sample: { ... }
build_errors:
description: When state='build-ready', and build errors exist, this contains list of errors
returned: only when build-ready returns fail
type: list
sample: [{...}, {...}]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, check_aos_version, find_collection_item
from ansible.module_utils.pycompat24 import get_exception
def create_blueprint(module, aos, name):
margs = module.params
try:
template_id = aos.DesignTemplates[margs['template']].id
# Create a new Object based on the name
blueprint = aos.Blueprints[name]
blueprint.create(template_id, reference_arch=margs['reference_arch'])
except:
exc = get_exception()
if 'UNPROCESSABLE ENTITY' in exc.message:
msg = 'likely missing dependencies'
else:
msg = exc.message
module.fail_json(msg="Unable to create blueprint: %s" % exc.message)
return blueprint
def ensure_absent(module, aos, blueprint):
if blueprint.exists is False:
module.exit_json(changed=False)
else:
if not module.check_mode:
try:
blueprint.delete()
except:
exc = get_exception()
module.fail_json(msg='Unable to delete blueprint, %s' % exc.message)
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name)
def ensure_present(module, aos, blueprint):
margs = module.params
if blueprint.exists:
module.exit_json(changed=False,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
# Check if template is defined and is valid
if margs['template'] is None:
module.fail_json(msg="You must define a 'template' name to create a new blueprint, currently missing")
elif aos.DesignTemplates.find(label=margs['template']) is None:
module.fail_json(msg="You must define a Valid 'template' name to create a new blueprint, %s is not valid" % margs['template'])
# Check if reference_arch
if margs['reference_arch'] is None:
module.fail_json(msg="You must define a 'reference_arch' to create a new blueprint, currently missing")
if not module.check_mode:
blueprint = create_blueprint(module, aos, margs['name'])
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
module.exit_json(changed=True,
name=margs['name'])
def ensure_build_ready(module, aos, blueprint):
margs = module.params
if not blueprint.exists:
module.fail_json(msg='blueprint %s does not exist' % blueprint.name)
if blueprint.await_build_ready(timeout=margs['timeout']*1000):
module.exit_json(contents=blueprint.contents)
else:
module.fail_json(msg='blueprint %s has build errors',
build_erros=blueprint.build_errors)
def aos_blueprint(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
try:
my_blueprint = find_collection_item(aos.Blueprints,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
ensure_absent(module, aos, my_blueprint)
elif margs['state'] == 'present':
ensure_present(module, aos, my_blueprint)
elif margs['state'] == 'build-ready':
ensure_build_ready(module, aos, my_blueprint)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False ),
state=dict(choices=[
'present', 'absent', 'build-ready'],
default='present'),
timeout=dict(type="int", default=5),
template=dict(required=False),
reference_arch=dict(required=False)
),
mutually_exclusive = [('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_blueprint(module)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,426,815,031,446,339,000 | 29.462838 | 138 | 0.608961 | false |
PokeHunterProject/pogom-updated | pogom/pgoapi/protos/POGOProtos/Networking/Responses/SetBuddyPokemonResponse_pb2.py | 6 | 4585 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data import BuddyPokemon_pb2 as POGOProtos_dot_Data_dot_BuddyPokemon__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n=POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a\"POGOProtos/Data/BuddyPokemon.proto\"\x95\x02\n\x17SetBuddyPokemonResponse\x12O\n\x06result\x18\x01 \x01(\x0e\x32?.POGOProtos.Networking.Responses.SetBuddyPokemonResponse.Result\x12\x34\n\rupdated_buddy\x18\x02 \x01(\x0b\x32\x1d.POGOProtos.Data.BuddyPokemon\"s\n\x06Result\x12\t\n\x05UNEST\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x1a\n\x16\x45RROR_POKEMON_DEPLOYED\x10\x02\x12\x1b\n\x17\x45RROR_POKEMON_NOT_OWNED\x10\x03\x12\x18\n\x14\x45RROR_POKEMON_IS_EGG\x10\x04\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_BuddyPokemon__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETBUDDYPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNEST', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_DEPLOYED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_NOT_OWNED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_IS_EGG', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=297,
serialized_end=412,
)
_sym_db.RegisterEnumDescriptor(_SETBUDDYPOKEMONRESPONSE_RESULT)
_SETBUDDYPOKEMONRESPONSE = _descriptor.Descriptor(
name='SetBuddyPokemonResponse',
full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='updated_buddy', full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.updated_buddy', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETBUDDYPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=412,
)
_SETBUDDYPOKEMONRESPONSE.fields_by_name['result'].enum_type = _SETBUDDYPOKEMONRESPONSE_RESULT
_SETBUDDYPOKEMONRESPONSE.fields_by_name['updated_buddy'].message_type = POGOProtos_dot_Data_dot_BuddyPokemon__pb2._BUDDYPOKEMON
_SETBUDDYPOKEMONRESPONSE_RESULT.containing_type = _SETBUDDYPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['SetBuddyPokemonResponse'] = _SETBUDDYPOKEMONRESPONSE
SetBuddyPokemonResponse = _reflection.GeneratedProtocolMessageType('SetBuddyPokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _SETBUDDYPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.SetBuddyPokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetBuddyPokemonResponse)
))
_sym_db.RegisterMessage(SetBuddyPokemonResponse)
# @@protoc_insertion_point(module_scope)
| mit | -108,050,510,883,710,910 | 38.525862 | 610 | 0.756598 | false |
mrshelly/openerp71313 | openerp/tools/float_utils.py | 151 | 9267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 | -3,301,733,113,162,020,000 | 48.822581 | 84 | 0.647459 | false |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/tools/third_party/pytest/src/pytest.py | 34 | 1712 | # PYTHON_ARGCOMPLETE_OK
"""
pytest: unit and functional testing with Python.
"""
# else we are imported
from _pytest.config import main, UsageError, cmdline, hookspec, hookimpl
from _pytest.fixtures import fixture, yield_fixture
from _pytest.assertion import register_assert_rewrite
from _pytest.freeze_support import freeze_includes
from _pytest import __version__
from _pytest.debugging import pytestPDB as __pytestPDB
from _pytest.recwarn import warns, deprecated_call
from _pytest.outcomes import fail, skip, importorskip, exit, xfail
from _pytest.mark import MARK_GEN as mark, param
from _pytest.main import Session
from _pytest.nodes import Item, Collector, File
from _pytest.fixtures import fillfixtures as _fillfuncargs
from _pytest.python import Module, Class, Instance, Function, Generator
from _pytest.python_api import approx, raises
set_trace = __pytestPDB.set_trace
__all__ = [
"main",
"UsageError",
"cmdline",
"hookspec",
"hookimpl",
"__version__",
"register_assert_rewrite",
"freeze_includes",
"set_trace",
"warns",
"deprecated_call",
"fixture",
"yield_fixture",
"fail",
"skip",
"xfail",
"importorskip",
"exit",
"mark",
"param",
"approx",
"_fillfuncargs",
"Item",
"File",
"Collector",
"Session",
"Module",
"Class",
"Instance",
"Function",
"Generator",
"raises",
]
if __name__ == "__main__":
# if run as a script or by 'python -m pytest'
# we trigger the below "else" condition by the following import
import pytest
raise SystemExit(pytest.main())
else:
from _pytest.compat import _setup_collect_fakemodule
_setup_collect_fakemodule()
| bsd-3-clause | 3,365,083,057,401,377,300 | 22.777778 | 72 | 0.672313 | false |
BT-ojossen/odoo | addons/share/__init__.py | 448 | 1093 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_model
import res_users
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,049,758,142,943,521,000 | 41.038462 | 78 | 0.620311 | false |
variac/bazel | src/test/py/bazel/test_base.py | 1 | 10391 | # pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import locale
import os
import subprocess
import sys
import tempfile
import unittest
class Error(Exception):
"""Base class for errors in this module."""
pass
class ArgumentError(Error):
"""A function received a bad argument."""
pass
class EnvVarUndefinedError(Error):
"""An expected environment variable is not defined."""
def __init__(self, name):
Error.__init__(self, 'Environment variable "%s" is not defined' % name)
class TestBase(unittest.TestCase):
_runfiles = None
_temp = None
_tests_root = None
_test_cwd = None
def setUp(self):
unittest.TestCase.setUp(self)
if self._runfiles is None:
self._runfiles = TestBase._LoadRunfiles()
test_tmpdir = TestBase._CreateDirs(TestBase.GetEnv('TEST_TMPDIR'))
self._tests_root = TestBase._CreateDirs(
os.path.join(test_tmpdir, 'tests_root'))
self._temp = TestBase._CreateDirs(os.path.join(test_tmpdir, 'tmp'))
self._test_cwd = tempfile.mkdtemp(dir=self._tests_root)
os.chdir(self._test_cwd)
def AssertExitCode(self, actual_exit_code, expected_exit_code, stderr_lines):
"""Assert that `actual_exit_code` == `expected_exit_code`."""
if actual_exit_code != expected_exit_code:
self.fail('\n'.join([
'Bazel exited with %d (expected %d), stderr:' % (actual_exit_code,
expected_exit_code),
'(start stderr)----------------------------------------',
] + (stderr_lines or []) + [
'(end stderr)------------------------------------------',
]))
@staticmethod
def GetEnv(name, default=None):
"""Returns environment variable `name`.
Args:
name: string; name of the environment variable
default: anything; return this value if the envvar is not defined
Returns:
string, the envvar's value if defined, or `default` if the envvar is not
defined but `default` is
Raises:
EnvVarUndefinedError: if `name` is not a defined envvar and `default` is
None
"""
value = os.getenv(name, '__undefined_envvar__')
if value == '__undefined_envvar__':
if default:
return default
raise EnvVarUndefinedError(name)
return value
@staticmethod
def IsWindows():
"""Returns true if the current platform is Windows."""
return os.name == 'nt'
def Path(self, path):
"""Returns the absolute path of `path` relative to the scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar/BUILD"
Returns:
an absolute path
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
"""
if os.path.isabs(path) or '..' in path:
raise ArgumentError(('path="%s" may not be absolute and may not contain '
'uplevel references') % path)
return os.path.join(self._tests_root, path)
def Rlocation(self, runfile):
"""Returns the absolute path to a runfile."""
if TestBase.IsWindows():
return self._runfiles.get(runfile)
else:
return os.path.join(self._runfiles, runfile)
def ScratchDir(self, path):
"""Creates directories under the test's scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar"
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
IOError: if an I/O error occurs
"""
if not path:
return
abspath = self.Path(path)
if os.path.exists(abspath):
if os.path.isdir(abspath):
return
raise IOError('"%s" (%s) exists and is not a directory' % (path, abspath))
os.makedirs(abspath)
def ScratchFile(self, path, lines=None):
"""Creates a file under the test's scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar/BUILD"
lines: [string]; the contents of the file (newlines are added
automatically)
Returns:
The absolute path of the scratch file.
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
IOError: if an I/O error occurs
"""
if not path:
return
abspath = self.Path(path)
if os.path.exists(abspath) and not os.path.isfile(abspath):
raise IOError('"%s" (%s) exists and is not a file' % (path, abspath))
self.ScratchDir(os.path.dirname(path))
with open(abspath, 'w') as f:
if lines:
for l in lines:
f.write(l)
f.write('\n')
return abspath
def RunBazel(self, args, env_remove=None, env_add=None):
"""Runs "bazel <args>", waits for it to exit.
Args:
args: [string]; flags to pass to bazel (e.g. ['--batch', 'build', '//x'])
env_remove: set(string); optional; environment variables to NOT pass to
Bazel
env_add: set(string); optional; environment variables to pass to
Bazel, won't be removed by env_remove.
Returns:
(int, [string], [string]) tuple: exit code, stdout lines, stderr lines
"""
return self.RunProgram([
self.Rlocation('io_bazel/src/bazel'),
'--bazelrc=/dev/null',
'--nomaster_bazelrc',
] + args, env_remove, env_add)
def RunProgram(self, args, env_remove=None, env_add=None):
"""Runs a program (args[0]), waits for it to exit.
Args:
args: [string]; the args to run; args[0] should be the program itself
env_remove: set(string); optional; environment variables to NOT pass to
the program
env_add: set(string); optional; environment variables to pass to
the program, won't be removed by env_remove.
Returns:
(int, [string], [string]) tuple: exit code, stdout lines, stderr lines
"""
with tempfile.TemporaryFile(dir=self._test_cwd) as stdout:
with tempfile.TemporaryFile(dir=self._test_cwd) as stderr:
proc = subprocess.Popen(
args,
stdout=stdout,
stderr=stderr,
cwd=self._test_cwd,
env=self._EnvMap(env_remove, env_add))
exit_code = proc.wait()
stdout.seek(0)
stdout_lines = [
l.decode(locale.getpreferredencoding()).strip()
for l in stdout.readlines()
]
stderr.seek(0)
stderr_lines = [
l.decode(locale.getpreferredencoding()).strip()
for l in stderr.readlines()
]
return exit_code, stdout_lines, stderr_lines
def _EnvMap(self, env_remove=None, env_add=None):
"""Returns the environment variable map to run Bazel or other programs."""
if TestBase.IsWindows():
result = []
if sys.version_info.major == 3:
# Python 3.2 has os.listdir
result = [
n for n in os.listdir('c:\\program files\\java')
if n.startswith('jdk')
]
else:
# Python 2.7 has os.path.walk
def _Visit(result, _, names):
result.extend(n for n in names if n.startswith('jdk'))
while names:
names.pop()
os.path.walk('c:\\program files\\java\\', _Visit, result)
env = {
'SYSTEMROOT': TestBase.GetEnv('SYSTEMROOT'),
# TODO(laszlocsomor): Let Bazel pass BAZEL_SH and JAVA_HOME to tests
# and use those here instead of hardcoding paths.
'JAVA_HOME': 'c:\\program files\\java\\' + sorted(result)[-1],
'BAZEL_SH': 'c:\\tools\\msys64\\usr\\bin\\bash.exe',
# TODO(pcloudy): Remove this after no longer need to debug
# https://github.com/bazelbuild/bazel/issues/3273
'CC_CONFIGURE_DEBUG': '1'
}
# TODO(pcloudy): Remove these hardcoded paths after resolving
# https://github.com/bazelbuild/bazel/issues/3273
env['BAZEL_VC'] = 'visual-studio-not-found'
for p in [
(r'C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional'
r'\VC'),
r'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC',
r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC'
]:
if os.path.exists(p):
env['BAZEL_VC'] = p
break
else:
env = {'HOME': os.path.join(self._temp, 'home')}
env['PATH'] = TestBase.GetEnv('PATH')
# The inner Bazel must know that it's running as part of a test (so that it
# uses --max_idle_secs=15 by default instead of 3 hours, etc.), and it knows
# that by checking for TEST_TMPDIR.
env['TEST_TMPDIR'] = TestBase.GetEnv('TEST_TMPDIR')
env['TMP'] = self._temp
if env_remove:
for e in env_remove:
del env[e]
if env_add:
for e in env_add:
env[e] = env_add[e]
return env
@staticmethod
def _LoadRunfiles():
"""Loads the runfiles manifest from ${TEST_SRCDIR}/MANIFEST.
Only necessary to use on Windows, where runfiles are not symlinked in to the
runfiles directory, but are written to a MANIFEST file instead.
Returns:
on Windows: {string: string} dictionary, keys are runfiles-relative paths,
values are absolute paths that the runfiles entry is mapped to;
on other platforms: string; value of $TEST_SRCDIR
"""
test_srcdir = TestBase.GetEnv('TEST_SRCDIR')
if not TestBase.IsWindows():
return test_srcdir
result = {}
with open(os.path.join(test_srcdir, 'MANIFEST'), 'r') as f:
for l in f:
tokens = l.strip().split(' ')
if len(tokens) == 2:
result[tokens[0]] = tokens[1]
return result
@staticmethod
def _CreateDirs(path):
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
os.remove(path)
os.makedirs(path)
return path
| apache-2.0 | -5,744,995,324,742,202,000 | 32.627832 | 80 | 0.617457 | false |
tetherless-world/graphene | whyis/task_utils.py | 1 | 1444 | from celery.task.control import inspect
# def setup_task(service):
# service.app = app
# print(service)
# result = None
# if service.query_predicate == self.NS.whyis.globalChangeQuery:
# result = process_resource
# else:
# result = process_nanopub
# result.service = lambda : service
# return result
def is_running_waiting(service_name):
"""
Check if a task is running or waiting.
"""
if is_waiting(service_name):
return True
running_tasks = list(inspect().active().values())[0]
for task in running_tasks:
if 'kwargs' in task:
args = eval(task['kwargs'])
if service_name == args.get('service_name',None):
return True
return False
def is_waiting(service_name):
"""
Check if a task is waiting.
"""
scheduled_tasks = list(inspect().scheduled().values())[0]
for task in scheduled_tasks:
if 'kwargs' in task:
args = eval(task['kwargs'])
if service_name == args.get('service_name',None):
return True
return False
def is_waiting_importer(entity_name, exclude=None):
"""
Check if a task is running or waiting.
"""
if inspect().scheduled():
tasks = list(inspect().scheduled().values())
for task in tasks:
if 'args' in task and entity_name in task['args']:
return True
return False
| apache-2.0 | 5,429,623,847,146,867,000 | 26.245283 | 68 | 0.58518 | false |
DecisionSystemsGroup/DSGos | airootfs/usr/share/DSGos-Installer/DSGos_Installer/installation/automatic.py | 2 | 14419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# automatic.py
#
# Copyright © 2013-2015 DSGos
#
# This file is part of DSGos_Installer.
#
# DSGos_Installer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DSGos_Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with DSGos_Installer; If not, see <http://www.gnu.org/licenses/>.
""" Automatic installation screen """
from gi.repository import Gtk
import os
import sys
import logging
if __name__ == '__main__':
# Insert the parent directory at the front of the path.
# This is used only when we want to test this screen
base_dir = os.path.dirname(__file__) or '.'
parent_dir = os.path.join(base_dir, '..')
sys.path.insert(0, parent_dir)
import misc.misc as misc
import parted3.fs_module as fs
import parted
from gtkbasebox import GtkBaseBox
from installation import install
from installation import action
from installation import auto_partition
DEST_DIR = "/install"
class InstallationAutomatic(GtkBaseBox):
def __init__(self, params, prev_page="installation_ask", next_page="summary"):
super().__init__(self, params, "automatic", prev_page, next_page)
self.auto_device = None
self.device_store = self.ui.get_object('part_auto_select_drive')
self.device_label = self.ui.get_object('part_auto_select_drive_label')
self.entry = {'luks_password': self.ui.get_object('entry_luks_password'),
'luks_password_confirm': self.ui.get_object('entry_luks_password_confirm')}
self.image_password_ok = self.ui.get_object('image_password_ok')
self.devices = {}
self.installation = None
self.bootloader = "grub2"
self.bootloader_entry = self.ui.get_object('bootloader_entry')
self.bootloader_device_entry = self.ui.get_object('bootloader_device_entry')
self.bootloader_devices = {}
self.bootloader_device = {}
self.mount_devices = {}
self.fs_devices = {}
def translate_ui(self):
txt = _("Select drive:")
self.device_label.set_markup(txt)
label = self.ui.get_object('text_automatic')
txt = _("WARNING! This will overwrite everything currently on your drive!")
txt = "<b>{0}</b>".format(txt)
label.set_markup(txt)
label = self.ui.get_object('info_label')
txt = _("Select the drive we should use to install DSGos and then click above to start the process.")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password')
txt = _("Encryption Password:")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password_confirm')
txt = _("Confirm your password:")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password_warning')
txt = _("LUKS Password. Do not use special characters or accents!")
label.set_markup(txt)
btn = self.ui.get_object('checkbutton_show_password')
btn.set_label(_("Show password"))
self.header.set_subtitle(_("Automatic Installation Mode"))
txt = _("Use the device below for boot loader installation:")
txt = "<span weight='bold' size='small'>{0}</span>".format(txt)
label = self.ui.get_object('bootloader_device_info_label')
label.set_markup(txt)
txt = _("Bootloader:")
label = self.ui.get_object('bootloader_label')
label.set_markup(txt)
txt = _("Device:")
label = self.ui.get_object('bootloader_device_label')
label.set_markup(txt)
def on_checkbutton_show_password_toggled(self, widget):
""" show/hide LUKS passwords """
btn = self.ui.get_object('checkbutton_show_password')
show = btn.get_active()
self.entry['luks_password'].set_visibility(show)
self.entry['luks_password_confirm'].set_visibility(show)
def populate_devices(self):
with misc.raised_privileges():
device_list = parted.getAllDevices()
self.device_store.remove_all()
self.devices = {}
self.bootloader_device_entry.remove_all()
self.bootloader_devices.clear()
for dev in device_list:
# avoid cdrom and any raid, lvm volumes or encryptfs
if not dev.path.startswith("/dev/sr") and \
not dev.path.startswith("/dev/mapper"):
# hard drives measure themselves assuming kilo=1000, mega=1mil, etc
size_in_gigabytes = int((dev.length * dev.sectorSize) / 1000000000)
line = '{0} [{1} GB] ({2})'.format(dev.model, size_in_gigabytes, dev.path)
self.device_store.append_text(line)
self.devices[line] = dev.path
self.bootloader_device_entry.append_text(line)
self.bootloader_devices[line] = dev.path
logging.debug(line)
self.select_first_combobox_item(self.device_store)
self.select_first_combobox_item(self.bootloader_device_entry)
@staticmethod
def select_first_combobox_item(combobox):
tree_model = combobox.get_model()
tree_iter = tree_model.get_iter_first()
combobox.set_active_iter(tree_iter)
def on_select_drive_changed(self, widget):
line = self.device_store.get_active_text()
if line is not None:
self.auto_device = self.devices[line]
self.forward_button.set_sensitive(True)
def prepare(self, direction):
self.translate_ui()
self.populate_devices()
# image = Gtk.Image.new_from_icon_name("go-next-symbolic", Gtk.IconSize.BUTTON)
# self.forward_button.set_label("")
# self.forward_button.set_image(image)
# self.forward_button.set_always_show_image(True)
# self.forward_button.set_name('fwd_btn')
self.show_all()
self.fill_bootloader_entry()
luks_grid = self.ui.get_object('luks_grid')
luks_grid.set_sensitive(self.settings.get('use_luks'))
# self.forward_button.set_sensitive(False)
def store_values(self):
""" Let's do our installation! """
#response = self.show_warning()
#if response == Gtk.ResponseType.NO:
# return False
luks_password = self.entry['luks_password'].get_text()
self.settings.set('luks_root_password', luks_password)
if luks_password != "":
logging.debug("A root LUKS password has been set")
self.set_bootloader()
return True
def on_luks_password_changed(self, widget):
luks_password = self.entry['luks_password'].get_text()
luks_password_confirm = self.entry['luks_password_confirm'].get_text()
install_ok = True
if len(luks_password) <= 0:
self.image_password_ok.set_opacity(0)
self.forward_button.set_sensitive(True)
else:
if luks_password == luks_password_confirm:
icon = "emblem-default"
else:
icon = "dialog-warning"
install_ok = False
self.image_password_ok.set_from_icon_name(icon, Gtk.IconSize.LARGE_TOOLBAR)
self.image_password_ok.set_opacity(1)
self.forward_button.set_sensitive(install_ok)
def fill_bootloader_entry(self):
""" Put the bootloaders for the user to choose """
self.bootloader_entry.remove_all()
if os.path.exists('/sys/firmware/efi'):
self.bootloader_entry.append_text("Grub2")
self.bootloader_entry.append_text("Gummiboot")
self.bootloader_entry.set_active(0)
self.bootloader_entry.show()
else:
self.bootloader_entry.hide()
widget_ids = ["bootloader_label", "bootloader_device_label"]
for widget_id in widget_ids:
widget = self.ui.get_object(widget_id)
widget.hide()
def on_bootloader_device_check_toggled(self, checkbox):
status = checkbox.get_active()
widget_ids = [
"bootloader_device_entry",
"bootloader_entry",
"bootloader_label",
"bootloader_device_label"]
for widget_id in widget_ids:
widget = self.ui.get_object(widget_id)
widget.set_sensitive(status)
self.settings.set('bootloader_install', status)
def on_bootloader_device_entry_changed(self, widget):
""" Get new selected bootloader device """
line = self.bootloader_device_entry.get_active_text()
if line is not None:
self.bootloader_device = self.bootloader_devices[line]
def on_bootloader_entry_changed(self, widget):
""" Get new selected bootloader """
line = self.bootloader_entry.get_active_text()
if line is not None:
self.bootloader = line.lower()
def show_warning(self):
txt = _("Do you really want to proceed and delete all your content on your hard drive?")
txt = txt + "\n\n" + self.device_store.get_active_text()
message = Gtk.MessageDialog(
transient_for=self.get_toplevel(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.YES_NO,
text=txt)
response = message.run()
message.destroy()
return response
def get_changes(self):
""" Grab all changes for confirmation """
change_list = [action.Action("delete", self.auto_device)]
auto = auto_partition.AutoPartition(dest_dir=DEST_DIR,
auto_device=self.auto_device,
use_luks=self.settings.get("use_luks"),
luks_password=self.settings.get("luks_root_password"),
use_lvm=self.settings.get("use_lvm"),
use_home=self.settings.get("use_home"),
bootloader=self.settings.get("bootloader"),
callback_queue=self.callback_queue)
devices = auto.get_devices()
mount_devices = auto.get_mount_devices()
fs_devices = auto.get_fs_devices()
mount_points = {}
for mount_point in mount_devices:
device = mount_devices[mount_point]
mount_points[device] = mount_point
for device in sorted(fs_devices.keys()):
try:
txt = _("Device {0} will be created ({1} filesystem) as {2}").format(device, fs_devices[device], mount_points[device])
except KeyError:
txt = _("Device {0} will be created ({1} filesystem)").format(device, fs_devices[device])
act = action.Action("info", txt)
change_list.append(act)
return change_list
def run_format(self):
logging.debug("Creating partitions and their filesystems in %s", self.auto_device)
# If no key password is given a key file is generated and stored in /boot
# (see auto_partition.py)
auto = auto_partition.AutoPartition(dest_dir=DEST_DIR,
auto_device=self.auto_device,
use_luks=self.settings.get("use_luks"),
luks_password=self.settings.get("luks_root_password"),
use_lvm=self.settings.get("use_lvm"),
use_home=self.settings.get("use_home"),
bootloader=self.settings.get("bootloader"),
callback_queue=self.callback_queue)
auto.run()
# Get mount_devices and fs_devices
# (mount_devices will be used when configuring GRUB in modify_grub_default)
# (fs_devices will be used when configuring the fstab file)
self.mount_devices = auto.get_mount_devices()
self.fs_devices = auto.get_fs_devices()
def set_bootloader(self):
checkbox = self.ui.get_object("bootloader_device_check")
if not checkbox.get_active():
self.settings.set('bootloader_install', False)
logging.info("DSGos_Installer will not install any bootloader")
else:
self.settings.set('bootloader_install', True)
self.settings.set('bootloader_device', self.bootloader_device)
self.settings.set('bootloader', self.bootloader)
msg = _("DSGos will install the bootloader '{0}' in device '{1}'")
msg = msg.format(self.bootloader, self.bootloader_device)
logging.info(msg)
def run_install(self, packages, metalinks):
txt = _("DSGos_Installer will install DSGos on device %s")
logging.info(txt, self.auto_device)
self.settings.set('auto_device', self.auto_device)
ssd = {self.auto_device: fs.is_ssd(self.auto_device)}
if not self.testing:
self.installation = install.Installation(
self.settings,
self.callback_queue,
packages,
metalinks,
self.mount_devices,
self.fs_devices,
ssd)
self.installation.start()
else:
logging.debug("Testing mode, not changing anything")
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
if __name__ == '__main__':
from test_screen import _, run
run('InstallationAutomatic')
| mit | -2,635,709,658,637,740,000 | 37.042216 | 134 | 0.597378 | false |
hrashk/sympy | sympy/mpmath/tests/test_special.py | 37 | 2854 | from sympy.mpmath import *
def test_special():
assert inf == inf
assert inf != -inf
assert -inf == -inf
assert inf != nan
assert nan != nan
assert isnan(nan)
assert --inf == inf
assert abs(inf) == inf
assert abs(-inf) == inf
assert abs(nan) != abs(nan)
assert isnan(inf - inf)
assert isnan(inf + (-inf))
assert isnan(-inf - (-inf))
assert isnan(inf + nan)
assert isnan(-inf + nan)
assert mpf(2) + inf == inf
assert 2 + inf == inf
assert mpf(2) - inf == -inf
assert 2 - inf == -inf
assert inf > 3
assert 3 < inf
assert 3 > -inf
assert -inf < 3
assert inf > mpf(3)
assert mpf(3) < inf
assert mpf(3) > -inf
assert -inf < mpf(3)
assert not (nan < 3)
assert not (nan > 3)
assert isnan(inf * 0)
assert isnan(-inf * 0)
assert inf * 3 == inf
assert inf * -3 == -inf
assert -inf * 3 == -inf
assert -inf * -3 == inf
assert inf * inf == inf
assert -inf * -inf == inf
assert isnan(nan / 3)
assert inf / -3 == -inf
assert inf / 3 == inf
assert 3 / inf == 0
assert -3 / inf == 0
assert 0 / inf == 0
assert isnan(inf / inf)
assert isnan(inf / -inf)
assert isnan(inf / nan)
assert mpf('inf') == mpf('+inf') == inf
assert mpf('-inf') == -inf
assert isnan(mpf('nan'))
assert isinf(inf)
assert isinf(-inf)
assert not isinf(mpf(0))
assert not isinf(nan)
def test_special_powers():
assert inf**3 == inf
assert isnan(inf**0)
assert inf**-3 == 0
assert (-inf)**2 == inf
assert (-inf)**3 == -inf
assert isnan((-inf)**0)
assert (-inf)**-2 == 0
assert (-inf)**-3 == 0
assert isnan(nan**5)
assert isnan(nan**0)
def test_functions_special():
assert exp(inf) == inf
assert exp(-inf) == 0
assert isnan(exp(nan))
assert log(inf) == inf
assert isnan(log(nan))
assert isnan(sin(inf))
assert isnan(sin(nan))
assert atan(inf).ae(pi/2)
assert atan(-inf).ae(-pi/2)
assert isnan(sqrt(nan))
assert sqrt(inf) == inf
def test_convert_special():
float_inf = 1e300 * 1e300
float_ninf = -float_inf
float_nan = float_inf/float_ninf
assert mpf(3) * float_inf == inf
assert mpf(3) * float_ninf == -inf
assert isnan(mpf(3) * float_nan)
assert not (mpf(3) < float_nan)
assert not (mpf(3) > float_nan)
assert not (mpf(3) <= float_nan)
assert not (mpf(3) >= float_nan)
assert float(mpf('1e1000')) == float_inf
assert float(mpf('-1e1000')) == float_ninf
assert float(mpf('1e100000000000000000')) == float_inf
assert float(mpf('-1e100000000000000000')) == float_ninf
assert float(mpf('1e-100000000000000000')) == 0.0
def test_div_bug():
assert isnan(nan/1)
assert isnan(nan/2)
assert inf/2 == inf
assert (-inf)/2 == -inf
| bsd-3-clause | 7,183,662,294,269,935,000 | 24.256637 | 60 | 0.566573 | false |
nicobustillos/odoo | addons/hr_attendance/hr_attendance.py | 56 | 9198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_action_reason(osv.osv):
_name = "hr.action.reason"
_description = "Action Reason"
_columns = {
'name': fields.char('Reason', required=True, help='Specifies the reason for Signing In/Signing Out.'),
'action_type': fields.selection([('sign_in', 'Sign in'), ('sign_out', 'Sign out')], "Action Type"),
}
_defaults = {
'action_type': 'sign_in',
}
def _employee_get(obj, cr, uid, context=None):
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
return ids and ids[0] or False
class hr_attendance(osv.osv):
_name = "hr.attendance"
_description = "Attendance"
def _worked_hours_compute(self, cr, uid, ids, fieldnames, args, context=None):
"""For each hr.attendance record of action sign-in: assign 0.
For each hr.attendance record of action sign-out: assign number of hours since last sign-in.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
if obj.action == 'sign_in':
res[obj.id] = 0
elif obj.action == 'sign_out':
# Get the associated sign-in
last_signin_id = self.search(cr, uid, [
('employee_id', '=', obj.employee_id.id),
('name', '<', obj.name), ('action', '=', 'sign_in')
], limit=1, order='name DESC')
if last_signin_id:
last_signin = self.browse(cr, uid, last_signin_id, context=context)[0]
# Compute time elapsed between sign-in and sign-out
last_signin_datetime = datetime.strptime(last_signin.name, '%Y-%m-%d %H:%M:%S')
signout_datetime = datetime.strptime(obj.name, '%Y-%m-%d %H:%M:%S')
workedhours_datetime = (signout_datetime - last_signin_datetime)
res[obj.id] = ((workedhours_datetime.seconds) / 60) / 60
else:
res[obj.id] = False
return res
_columns = {
'name': fields.datetime('Date', required=True, select=1),
'action': fields.selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'), ('action','Action')], 'Action', required=True),
'action_desc': fields.many2one("hr.action.reason", "Action Reason", domain="[('action_type', '=', action)]", help='Specifies the reason for Signing In/Signing Out in case of extra hours.'),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, select=True),
'worked_hours': fields.function(_worked_hours_compute, type='float', string='Worked Hours', store=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), #please don't remove the lambda, if you remove it then the current time will not change
'employee_id': _employee_get,
}
def _altern_si_so(self, cr, uid, ids, context=None):
""" Alternance sign_in/sign_out check.
Previous (if exists) must be of opposite action.
Next (if exists) must be of opposite action.
"""
for att in self.browse(cr, uid, ids, context=context):
# search and browse for first previous and first next records
prev_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '<', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name DESC')
next_add_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '>', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name ASC')
prev_atts = self.browse(cr, uid, prev_att_ids, context=context)
next_atts = self.browse(cr, uid, next_add_ids, context=context)
# check for alternance, return False if at least one condition is not satisfied
if prev_atts and prev_atts[0].action == att.action: # previous exists and is same action
return False
if next_atts and next_atts[0].action == att.action: # next exists and is same action
return False
if (not prev_atts) and (not next_atts) and att.action != 'sign_in': # first attendance must be sign_in
return False
return True
_constraints = [(_altern_si_so, 'Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])]
_order = 'name desc'
class hr_employee(osv.osv):
_inherit = "hr.employee"
_description = "Employee"
def _state(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = 'absent'
cr.execute('SELECT hr_attendance.action, hr_attendance.employee_id \
FROM ( \
SELECT MAX(name) AS name, employee_id \
FROM hr_attendance \
WHERE action in (\'sign_in\', \'sign_out\') \
GROUP BY employee_id \
) AS foo \
LEFT JOIN hr_attendance \
ON (hr_attendance.employee_id = foo.employee_id \
AND hr_attendance.name = foo.name) \
WHERE hr_attendance.employee_id IN %s',(tuple(ids),))
for res in cr.fetchall():
result[res[1]] = res[0] == 'sign_in' and 'present' or 'absent'
return result
def _last_sign(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = False
cr.execute("""select max(name) as name
from hr_attendance
where action in ('sign_in', 'sign_out') and employee_id = %s""",(id,))
for res in cr.fetchall():
result[id] = res[0]
return result
def _attendance_access(self, cr, uid, ids, name, args, context=None):
# this function field use to hide attendance button to singin/singout from menu
group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_hr_attendance')
visible = False
if uid in [user.id for user in group.users]:
visible = True
return dict([(x, visible) for x in ids])
_columns = {
'state': fields.function(_state, type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Attendance'),
'last_sign': fields.function(_last_sign, type='datetime', string='Last Sign'),
'attendance_access': fields.function(_attendance_access, string='Attendance Access', type='boolean'),
}
def _action_check(self, cr, uid, emp_id, dt=False, context=None):
cr.execute('SELECT MAX(name) FROM hr_attendance WHERE employee_id=%s', (emp_id,))
res = cr.fetchone()
return not (res and (res[0]>=(dt or time.strftime('%Y-%m-%d %H:%M:%S'))))
def attendance_action_change(self, cr, uid, ids, context=None):
if context is None:
context = {}
action_date = context.get('action_date', False)
action = context.get('action', False)
hr_attendance = self.pool.get('hr.attendance')
warning_sign = {'sign_in': _('Sign In'), 'sign_out': _('Sign Out')}
for employee in self.browse(cr, uid, ids, context=context):
if not action:
if employee.state == 'present': action = 'sign_out'
if employee.state == 'absent': action = 'sign_in'
if not self._action_check(cr, uid, employee.id, action_date, context):
raise osv.except_osv(_('Warning'), _('You tried to %s with a date anterior to another event !\nTry to contact the HR Manager to correct attendances.')%(warning_sign[action],))
vals = {'action': action, 'employee_id': employee.id}
if action_date:
vals['name'] = action_date
hr_attendance.create(cr, uid, vals, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,568,110,238,119,446,000 | 47.157068 | 197 | 0.572951 | false |
theonlydude/RandomMetroidSolver | solver/commonSolver.py | 1 | 37327 | import logging, time
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
from logic.smbool import SMBool, smboolFalse
from logic.helpers import Bosses
from rom.romloader import RomLoader
from rom.rom_patches import RomPatches
from graph.graph import AccessGraphSolver as AccessGraph
from utils.utils import PresetLoader
from solver.conf import Conf
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils, getAccessPoint
from utils.parameters import easy, medium, hard, harder, hardcore, mania, infinity
from utils.doorsmanager import DoorsManager
from logic.logic import Logic
class CommonSolver(object):
def loadRom(self, rom, interactive=False, magic=None, startLocation=None):
self.scavengerOrder = []
# startLocation param is only use for seedless
if rom == None:
# TODO::add a --logic parameter for seedless
Logic.factory('vanilla')
self.romFileName = 'seedless'
self.majorsSplit = 'Full'
self.masterMajorsSplit = 'Full'
self.areaRando = True
self.bossRando = True
self.escapeRando = False
self.escapeTimer = "03:00"
self.startLocation = startLocation
RomPatches.setDefaultPatches(startLocation)
self.startArea = getAccessPoint(startLocation).Start['solveArea']
# in seedless load all the vanilla transitions
self.areaTransitions = vanillaTransitions[:]
self.bossTransitions = vanillaBossesTransitions[:]
self.escapeTransition = [vanillaEscapeTransitions[0]]
# in seedless we allow mixing of area and boss transitions
self.hasMixedTransitions = True
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.locations = Logic.locations
for loc in self.locations:
loc.itemName = 'Nothing'
# set doors related to default patches
DoorsManager.setDoorsColor()
self.doorsRando = False
self.hasNothing = False
else:
self.romFileName = rom
self.romLoader = RomLoader.factory(rom, magic)
Logic.factory(self.romLoader.readLogic())
self.romLoader.readNothingId()
self.locations = Logic.locations
(self.majorsSplit, self.masterMajorsSplit) = self.romLoader.assignItems(self.locations)
(self.startLocation, self.startArea, startPatches) = self.romLoader.getStartAP()
if not GraphUtils.isStandardStart(self.startLocation) and self.majorsSplit != 'Full':
# update major/chozo locs in non standard start
self.romLoader.updateSplitLocs(self.majorsSplit, self.locations)
(self.areaRando, self.bossRando, self.escapeRando) = self.romLoader.loadPatches()
RomPatches.ActivePatches += startPatches
self.escapeTimer = self.romLoader.getEscapeTimer()
self.doorsRando = self.romLoader.loadDoorsColor()
self.hasNothing = self.checkLocsForNothing()
if self.majorsSplit == 'Scavenger':
self.scavengerOrder = self.romLoader.loadScavengerOrder(self.locations)
if interactive == False:
print("ROM {} majors: {} area: {} boss: {} escape: {} patches: {} activePatches: {}".format(rom, self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(self.romLoader.getPatches()), sorted(RomPatches.ActivePatches)))
else:
print("majors: {} area: {} boss: {} escape: {} activepatches: {}".format(self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(RomPatches.ActivePatches)))
(self.areaTransitions, self.bossTransitions, self.escapeTransition, self.hasMixedTransitions) = self.romLoader.getTransitions()
if interactive == True and self.debug == False:
# in interactive area mode we build the graph as we play along
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.smbm = SMBoolManager()
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
# store at each step how many locations are available
self.nbAvailLocs = []
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for loc in self.locations:
self.log.debug('{:>50}: {:>16}'.format(loc.Name, loc.itemName))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def getLoc(self, locName):
for loc in self.locations:
if loc.Name == locName:
return loc
def getNextDifficulty(self, difficulty):
nextDiffs = {
0: easy,
easy: medium,
medium: hard,
hard: harder,
harder: hardcore,
hardcore: mania,
mania: infinity
}
return nextDiffs[difficulty]
def checkLocsForNothing(self):
# for the auto tracker, need to know if we have to track nothing items
return any(loc.itemName == "Nothing" for loc in self.locations)
def computeLocationsDifficulty(self, locations, phase="major"):
difficultyTarget = Conf.difficultyTarget
nextLocations = locations
# before looping on all diff targets, get only the available locations with diff target infinity
if difficultyTarget != infinity:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, infinity, self.lastAP)
nextLocations = [loc for loc in nextLocations if loc.difficulty]
while True:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, difficultyTarget, self.lastAP)
# check post available functions too
for loc in nextLocations:
loc.evalPostAvailable(self.smbm)
self.areaGraph.useCache(True)
# also check if we can come back to current AP from the location
for loc in nextLocations:
loc.evalComeBack(self.smbm, self.areaGraph, self.lastAP)
self.areaGraph.useCache(False)
nextLocations = [loc for loc in nextLocations if not loc.difficulty]
if not nextLocations:
break
if difficultyTarget == infinity:
# we've tested all the difficulties
break
# start a new loop with next difficulty
difficultyTarget = self.getNextDifficulty(difficultyTarget)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available {} locs:".format(phase))
for loc in locations:
if loc.difficulty.bool == True:
print("{:>48}: {:>8}".format(loc.Name, round(loc.difficulty.difficulty, 2)))
print(" smbool: {}".format(loc.difficulty))
print(" path: {}".format([ap.Name for ap in loc.path]))
def collectMajor(self, loc, itemName=None):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc, itemName)
return loc
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc)
return loc
def collectItem(self, loc, item=None):
if item == None:
item = loc.itemName
if self.vcr != None:
self.vcr.addLocation(loc.Name, item)
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc.Name, loc.Area, loc.GraphArea))
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
if self.checkDuplicateMajor == True:
if item not in ['Nothing', 'NoEnergy', 'Missile', 'Super', 'PowerBomb', 'ETank', 'Reserve']:
if self.smbm.haveItem(item):
print("WARNING: {} has already been picked up".format(item))
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc.itemName = item
self.collectedItems.append(item)
# we still need the boss difficulty
if not loc.isBoss():
loc.difficulty = smboolFalse
if self.log.getEffectiveLevel() == logging.DEBUG:
print("---------------------------------------------------------------")
print("collectItem: {:<16} at {:<48}".format(item, loc.Name))
print("---------------------------------------------------------------")
# last loc is used as root node for the graph.
# when loading a plando we can load locations from non connected areas, so they don't have an access point.
if loc.accessPoint is not None:
self.lastAP = loc.accessPoint
self.lastArea = loc.SolveArea
def getLocIndex(self, locName):
for (i, loc) in enumerate(self.visitedLocations):
if loc.Name == locName:
return i
def removeItemAt(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
locIndex = self.getLocIndex(locName)
if locIndex is None:
self.errorMsg = "Location '{}' has not been visited".format(locName)
return
loc = self.visitedLocations.pop(locIndex)
# removeItemAt is only used from the tracker, so all the locs are in majorLocations
self.majorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if self.mode in ['seedless', 'race', 'debug']:
# in seedless remove the first nothing found as collectedItems is not ordered
self.collectedItems.remove(item)
else:
self.collectedItems.pop(locIndex)
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def cancelLastItems(self, count):
if self.vcr != None:
self.vcr.addRollback(count)
if self.interactive == False:
self.nbAvailLocs = self.nbAvailLocs[:-count]
for _ in range(count):
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
if self.majorsSplit == 'Full':
self.majorLocations.append(loc)
else:
if loc.isClass(self.majorsSplit) or loc.isBoss():
self.majorLocations.append(loc)
else:
self.minorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
if self.lastAP is None:
# default to location first access from access point
self.lastAP = list(self.visitedLocations[-1].AccessFrom.keys())[0]
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc.Name, item, self.collectedItems[-1]))
# in plando we have to remove the last added item,
# else it could be used in computing the postAvailable of a location
if self.mode in ['plando', 'seedless', 'race', 'debug']:
loc.itemName = 'Nothing'
self.collectedItems.pop()
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def printLocs(self, locs, phase):
if len(locs) > 0:
print("{}:".format(phase))
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.format("Location Name", "Difficulty", "Distance", "ComeBack", "SolveArea", "AreaWeight"))
for loc in locs:
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.
format(loc.Name, round(loc.difficulty[1], 2), round(loc.distance, 2),
loc.comeBack, loc.SolveArea, loc.areaWeight if loc.areaWeight is not None else -1))
def getAvailableItemsList(self, locations, threshold):
# locations without distance are not available
locations = [loc for loc in locations if loc.distance is not None]
if len(locations) == 0:
return []
# add nocomeback locations which has been selected by the comeback step (areaWeight == 1)
around = [loc for loc in locations if( (loc.areaWeight is not None and loc.areaWeight == 1)
or ((loc.SolveArea == self.lastArea or loc.distance < 3)
and loc.difficulty.difficulty <= threshold
and not Bosses.areaBossDead(self.smbm, self.lastArea)
and loc.comeBack is not None and loc.comeBack == True) )]
outside = [loc for loc in locations if not loc in around]
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around1")
self.printLocs(outside, "outside1")
around.sort(key=lambda loc: (
# locs in the same area
0 if loc.SolveArea == self.lastArea
else 1,
# nearest locs
loc.distance,
# beating a boss
0 if loc.isBoss()
else 1,
# easiest first
loc.difficulty.difficulty
)
)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around2")
# we want to sort the outside locations by putting the ones in the same area first,
# then we sort the remaining areas starting whith boss dead status.
# we also want to sort by range of difficulty and not only with the difficulty threshold.
ranged = {
"areaWeight": [],
"easy": [],
"medium": [],
"hard": [],
"harder": [],
"hardcore": [],
"mania": [],
"noComeBack": []
}
for loc in outside:
if loc.areaWeight is not None:
ranged["areaWeight"].append(loc)
elif loc.comeBack is None or loc.comeBack == False:
ranged["noComeBack"].append(loc)
else:
difficulty = loc.difficulty.difficulty
if difficulty < medium:
ranged["easy"].append(loc)
elif difficulty < hard:
ranged["medium"].append(loc)
elif difficulty < harder:
ranged["hard"].append(loc)
elif difficulty < hardcore:
ranged["harder"].append(loc)
elif difficulty < mania:
ranged["hardcore"].append(loc)
else:
ranged["mania"].append(loc)
for key in ranged:
ranged[key].sort(key=lambda loc: (
# first locs in the same area
0 if loc.SolveArea == self.lastArea else 1,
# first nearest locs
loc.distance,
# beating a boss
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area)
and loc.isBoss())
else 100000,
# areas with boss still alive
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area))
else 100000,
loc.difficulty.difficulty))
if self.log.getEffectiveLevel() == logging.DEBUG:
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
self.printLocs(ranged[key], "outside2:{}".format(key))
outside = []
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
outside += ranged[key]
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0].SolveArea == self.lastArea
and majorsAvailable[0].difficulty.difficulty <= diffThreshold
and majorsAvailable[0].comeBack == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
elif len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M={}, m={}'.format(majorsAvailable[0].Name, minorsAvailable[0].Name))
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0].difficulty.difficulty
nextMinDifficulty = minorsAvailable[0].difficulty.difficulty
nextMajArea = majorsAvailable[0].SolveArea
nextMinArea = minorsAvailable[0].SolveArea
nextMajComeBack = majorsAvailable[0].comeBack
nextMinComeBack = minorsAvailable[0].comeBack
nextMajDistance = majorsAvailable[0].distance
nextMinDistance = minorsAvailable[0].distance
maxAreaWeigth = 10000
nextMajAreaWeight = majorsAvailable[0].areaWeight if majorsAvailable[0].areaWeight is not None else maxAreaWeigth
nextMinAreaWeight = minorsAvailable[0].areaWeight if minorsAvailable[0] .areaWeight is not None else maxAreaWeigth
if self.log.getEffectiveLevel() == logging.DEBUG:
print(" : {:>4} {:>32} {:>4} {:>4} {:>6}".format("diff", "area", "back", "dist", "weight"))
print("major: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMajDifficulty, 2), nextMajArea, nextMajComeBack, round(nextMajDistance, 2), nextMajAreaWeight))
print("minor: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMinDifficulty, 2), nextMinArea, nextMinComeBack, round(nextMinDistance, 2), nextMinAreaWeight))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge') and nextMajAreaWeight != maxAreaWeigth:
# we have charge, no longer need minors
self.log.debug("we have charge, no longer need minors, take major")
return self.collectMajor(majorsAvailable.pop(0))
else:
# respect areaweight first
if nextMajAreaWeight != nextMinAreaWeight:
self.log.debug("maj/min != area weight")
if nextMajAreaWeight < nextMinAreaWeight:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# then take item from loc where you can come back
elif nextMajComeBack != nextMinComeBack:
self.log.debug("maj/min != combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDifficulty <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance and <= diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == self.lastArea and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDifficulty > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance and > diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def checkMB(self, mbLoc, justCheck=False):
# add mother brain loc and check if it's accessible
self.majorLocations.append(mbLoc)
self.computeLocationsDifficulty(self.majorLocations)
if justCheck:
self.majorLocations.remove(mbLoc)
return mbLoc.difficulty == True
if mbLoc.difficulty == True:
self.log.debug("MB loc accessible")
self.collectMajor(mbLoc)
self.motherBrainKilled = True
else:
self.log.debug("MB loc not accessible")
self.majorLocations.remove(mbLoc)
self.motherBrainKilled = False
return self.motherBrainKilled
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
# remove mother brain location (there items pickup conditions on top of going to mother brain location)
mbLoc = self.getLoc('Mother Brain')
self.locations.remove(mbLoc)
if self.majorsSplit == 'Major':
self.majorLocations = [loc for loc in self.locations if loc.isMajor() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if loc.isMinor()]
elif self.majorsSplit == 'Chozo':
self.majorLocations = [loc for loc in self.locations if loc.isChozo() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isChozo() and not loc.isBoss()]
elif self.majorsSplit == 'Scavenger':
self.majorLocations = [loc for loc in self.locations if loc.isScavenger() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isScavenger() and not loc.isBoss()]
else:
# Full
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
diffThreshold = self.getDiffThreshold()
self.motherBrainKilled = False
self.motherBrainCouldBeKilled = False
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and self.scavengerHuntComplete():
if endDifficulty <= diffThreshold:
if self.checkMB(mbLoc):
self.log.debug("checkMB: all end game checks are ok, END")
break
else:
self.log.debug("checkMB: canEnd but MB loc not accessible")
else:
if not self.motherBrainCouldBeKilled:
self.motherBrainCouldBeKilled = self.checkMB(mbLoc, justCheck=True)
self.log.debug("checkMB: end checks ok except MB difficulty, MB could be killed: {}".format(self.motherBrainCouldBeKilled))
# check time limit
if self.runtimeLimit_s > 0:
if time.process_time() - self.startTime > self.runtimeLimit_s:
self.log.debug("time limit exceeded ({})".format(self.runtimeLimit_s))
return (-1, False)
self.log.debug("Current AP/Area: {}/{}".format(self.lastAP, self.lastArea))
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.majorsSplit != 'Full':
self.computeLocationsDifficulty(self.minorLocations, phase="minor")
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
minorsAvailable = [loc for loc in self.minorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
self.nbAvailLocs.append(len(self.getAllLocs(majorsAvailable, minorsAvailable)))
# remove next scavenger locs before checking if we're stuck
if self.majorsSplit == 'Scavenger':
majorsAvailable = self.filterScavengerLocs(majorsAvailable)
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 2")
if self.checkMB(mbLoc):
self.log.debug("all end game checks are ok, END")
break
else:
self.log.debug("We're stucked somewhere and can't reach mother brain")
# check if we were able to access MB and kill it.
# we do it before rollbacks to avoid endless rollbacks.
if self.motherBrainCouldBeKilled:
self.log.debug("we're stucked but we could have killed MB before")
self.motherBrainKilled = True
break
else:
# we're really stucked, try to rollback
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
self.log.debug("We could end but we're STUCK CAN'T REWIND")
return (-1, False)
# handle no comeback locations
rewindRequired = self.comeBack.handleNoComeBack(self.getAllLocs(majorsAvailable, minorsAvailable),
len(self.collectedItems))
if rewindRequired == True:
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
# sort them on difficulty and proximity
self.log.debug("getAvailableItemsList majors")
majorsAvailable = self.getAvailableItemsList(majorsAvailable, diffThreshold)
if self.majorsSplit == 'Full':
minorsAvailable = majorsAvailable
else:
self.log.debug("getAvailableItemsList minors")
minorsAvailable = self.getAvailableItemsList(minorsAvailable, diffThreshold)
# choose one to pick up
self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold)
self.comeBack.cleanNoComeBack(self.getAllLocs(self.majorLocations, self.minorLocations))
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc.Name, loc.itemName))
self.log.debug("bosses: {}".format([(boss, Bosses.bossDead(self.smbm, boss)) for boss in Bosses.Golden4()]))
return (difficulty, itemsOk)
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must:
# - beat golden 4
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
def getAllLocs(self, majorsAvailable, minorsAvailable):
if self.majorsSplit == 'Full':
return majorsAvailable
else:
return majorsAvailable+minorsAvailable
def computeDifficultyValue(self):
if not self.canEndGame() or not self.motherBrainKilled:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc.difficulty.difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
def filterScavengerLocs(self, majorsAvailable):
# check where we are in the scavenger hunt
huntInProgress = False
for index, loc in enumerate(self.scavengerOrder):
if loc not in self.visitedLocations:
huntInProgress = True
break
if huntInProgress and index < len(self.scavengerOrder)-1:
self.log.debug("Scavenger hunt in progress, {}/{}".format(index, len(self.scavengerOrder)-1))
# remove all next locs in the hunt
nextHuntLocs = self.scavengerOrder[index+1:]
for loc in nextHuntLocs:
self.log.debug("Scavenger hunt, try to remove loc {}".format(loc.Name))
try:
majorsAvailable.remove(loc)
except:
pass
return majorsAvailable
def scavengerHuntComplete(self):
if self.majorsSplit != 'Scavenger':
return True
else:
# check that last loc from the scavenger hunt list has been visited
lastLoc = self.scavengerOrder[-1]
return lastLoc in self.visitedLocations
| gpl-3.0 | -7,600,679,433,006,749,000 | 47.288486 | 252 | 0.581697 | false |
mitocw/edx-platform | openedx/core/lib/blockstore_api/tests/test_blockstore_api.py | 4 | 8710 | # -*- coding: utf-8 -*-
"""
Tests for xblock_utils.py
"""
import unittest
from uuid import UUID
from django.conf import settings
from openedx.core.lib import blockstore_api as api
# A fake UUID that won't represent any real bundle/draft/collection:
BAD_UUID = UUID('12345678-0000-0000-0000-000000000000')
@unittest.skipUnless(settings.RUN_BLOCKSTORE_TESTS, "Requires a running Blockstore server")
class BlockstoreApiClientTest(unittest.TestCase):
"""
Test for the Blockstore API Client.
The goal of these tests is not to test that Blockstore works correctly, but
that the API client can interact with it and all the API client methods
work.
"""
# Collections
def test_nonexistent_collection(self):
""" Request a collection that doesn't exist -> CollectionNotFound """
with self.assertRaises(api.CollectionNotFound):
api.get_collection(BAD_UUID)
def test_collection_crud(self):
""" Create, Fetch, Update, and Delete a Collection """
title = "Fire 🔥 Collection"
# Create:
coll = api.create_collection(title)
self.assertEqual(coll.title, title)
self.assertIsInstance(coll.uuid, UUID)
# Fetch:
coll2 = api.get_collection(coll.uuid)
self.assertEqual(coll, coll2)
# Update:
new_title = "Air 🌀 Collection"
coll3 = api.update_collection(coll.uuid, title=new_title)
self.assertEqual(coll3.title, new_title)
coll4 = api.get_collection(coll.uuid)
self.assertEqual(coll4.title, new_title)
# Delete:
api.delete_collection(coll.uuid)
with self.assertRaises(api.CollectionNotFound):
api.get_collection(coll.uuid)
# Bundles
def test_nonexistent_bundle(self):
""" Request a bundle that doesn't exist -> BundleNotFound """
with self.assertRaises(api.BundleNotFound):
api.get_bundle(BAD_UUID)
def test_bundle_crud(self):
""" Create, Fetch, Update, and Delete a Bundle """
coll = api.create_collection("Test Collection")
args = {
"title": "Water 💧 Bundle",
"slug": "h2o",
"description": "Sploosh",
}
# Create:
bundle = api.create_bundle(coll.uuid, **args)
for attr, value in args.items():
self.assertEqual(getattr(bundle, attr), value)
self.assertIsInstance(bundle.uuid, UUID)
# Fetch:
bundle2 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle, bundle2)
# Update:
new_description = "Water Nation Bending Lessons"
bundle3 = api.update_bundle(bundle.uuid, description=new_description)
self.assertEqual(bundle3.description, new_description)
bundle4 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle4.description, new_description)
# Delete:
api.delete_bundle(bundle.uuid)
with self.assertRaises(api.BundleNotFound):
api.get_bundle(bundle.uuid)
# Drafts, files, and reading/writing file contents:
def test_nonexistent_draft(self):
""" Request a draft that doesn't exist -> DraftNotFound """
with self.assertRaises(api.DraftNotFound):
api.get_draft(BAD_UUID)
def test_drafts_and_files(self):
"""
Test creating, reading, writing, committing, and reverting drafts and
files.
"""
coll = api.create_collection("Test Collection")
bundle = api.create_bundle(coll.uuid, title="Earth 🗿 Bundle", slug="earth", description="another test bundle")
# Create a draft
draft = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft.bundle_uuid, bundle.uuid)
self.assertEqual(draft.name, "test-draft")
self.assertGreaterEqual(draft.updated_at.year, 2019)
# And retrieve it again:
draft2 = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft, draft2)
# Also test retrieving using get_draft
draft3 = api.get_draft(draft.uuid)
self.assertEqual(draft, draft3)
# Write a file into the bundle:
api.write_draft_file(draft.uuid, "test.txt", b"initial version")
# Now the file should be visible in the draft:
draft_contents = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents, b"initial version")
api.commit_draft(draft.uuid)
# Write a new version into the draft:
api.write_draft_file(draft.uuid, "test.txt", b"modified version")
published_contents = api.get_bundle_file_data(bundle.uuid, "test.txt")
self.assertEqual(published_contents, b"initial version")
draft_contents2 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents2, b"modified version")
# Now delete the draft:
api.delete_draft(draft.uuid)
draft_contents3 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
# Confirm the file is now reset:
self.assertEqual(draft_contents3, b"initial version")
# Finaly, test the get_bundle_file* methods:
file_info1 = api.get_bundle_file_metadata(bundle.uuid, "test.txt")
self.assertEqual(file_info1.path, "test.txt")
self.assertEqual(file_info1.size, len(b"initial version"))
self.assertEqual(file_info1.hash_digest, "a45a5c6716276a66c4005534a51453ab16ea63c4")
self.assertEqual(list(api.get_bundle_files(bundle.uuid)), [file_info1])
self.assertEqual(api.get_bundle_files_dict(bundle.uuid), {
"test.txt": file_info1,
})
# Links
def test_links(self):
"""
Test operations involving bundle links.
"""
coll = api.create_collection("Test Collection")
# Create two library bundles and a course bundle:
lib1_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib1")
lib1_draft = api.get_or_create_bundle_draft(lib1_bundle.uuid, draft_name="test-draft")
lib2_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib2")
lib2_draft = api.get_or_create_bundle_draft(lib2_bundle.uuid, draft_name="other-draft")
course_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="course")
course_draft = api.get_or_create_bundle_draft(course_bundle.uuid, draft_name="test-draft")
# To create links, we need valid BundleVersions, which requires having committed at least one change:
api.write_draft_file(lib1_draft.uuid, "lib1-data.txt", "hello world")
api.commit_draft(lib1_draft.uuid) # Creates version 1
api.write_draft_file(lib2_draft.uuid, "lib2-data.txt", "hello world")
api.commit_draft(lib2_draft.uuid) # Creates version 1
# Lib2 has no links:
self.assertFalse(api.get_bundle_links(lib2_bundle.uuid))
# Create a link from lib2 to lib1
link1_name = "lib2_to_lib1"
api.set_draft_link(lib2_draft.uuid, link1_name, lib1_bundle.uuid, version=1)
# Now confirm the link exists in the draft:
lib2_draft_links = api.get_bundle_links(lib2_bundle.uuid, use_draft=lib2_draft.name)
self.assertIn(link1_name, lib2_draft_links)
self.assertEqual(lib2_draft_links[link1_name].direct.bundle_uuid, lib1_bundle.uuid)
self.assertEqual(lib2_draft_links[link1_name].direct.version, 1)
# Now commit the change to lib2:
api.commit_draft(lib2_draft.uuid) # Creates version 2
# Now create a link from course to lib2
link2_name = "course_to_lib2"
api.set_draft_link(course_draft.uuid, link2_name, lib2_bundle.uuid, version=2)
api.commit_draft(course_draft.uuid)
# And confirm the link exists in the resulting bundle version:
course_links = api.get_bundle_links(course_bundle.uuid)
self.assertIn(link2_name, course_links)
self.assertEqual(course_links[link2_name].direct.bundle_uuid, lib2_bundle.uuid)
self.assertEqual(course_links[link2_name].direct.version, 2)
# And since the links go course->lib2->lib1, course has an indirect link to lib1:
self.assertEqual(course_links[link2_name].indirect[0].bundle_uuid, lib1_bundle.uuid)
self.assertEqual(course_links[link2_name].indirect[0].version, 1)
# Finally, test deleting a link from course's draft:
api.set_draft_link(course_draft.uuid, link2_name, None, None)
self.assertFalse(api.get_bundle_links(course_bundle.uuid, use_draft=course_draft.name))
| agpl-3.0 | -8,308,587,071,712,730,000 | 43.605128 | 118 | 0.656703 | false |
OSURoboticsClub/underwater | manual-control/test.py | 1 | 1354 | #!/usr/bin/env python2
import ctypes
from ctypes import byref
from time import sleep
class SensorData(ctypes.Structure):
_fields_ = (
('a', ctypes.c_uint16),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_double),
('e', ctypes.c_uint32),
)
class ThrusterData(ctypes.Structure):
_fields_ = (
('ls', ctypes.c_uint8),
('rs', ctypes.c_uint8),
('fl', ctypes.c_uint16),
('fr', ctypes.c_uint16),
('bl', ctypes.c_uint16),
('br', ctypes.c_uint16),
)
class Robot(ctypes.Structure):
_fields_ = (
('state', ctypes.c_void_p),
)
w = ctypes.cdll.LoadLibrary('./libworker.so')
w.init.argtypes = ()
w.init.restype = Robot
w.wait_for_tick.argtypes = (ctypes.POINTER(Robot),)
w.wait_for_tick.restype = SensorData
w.set_thruster_data.argtypes = (
ctypes.POINTER(Robot), ctypes.POINTER(ThrusterData))
w.set_thruster_data.restype = None
robot = w.init()
thruster_data = ThrusterData(20, 20, 800, 300, 800, 300)
while True:
sensor_data = w.wait_for_sensor_data(byref(robot))
thruster_data.ls += 1
thruster_data.rs += 1
thruster_data.fl += 1
thruster_data.fr += 1
thruster_data.bl += 1
thruster_data.br += 1
sleep(.3)
w.set_thruster_data(byref(robot), byref(thruster_data))
print
| mit | -7,649,775,147,343,717,000 | 19.830769 | 59 | 0.599705 | false |
partofthething/home-assistant | homeassistant/components/soma/config_flow.py | 22 | 2346 | """Config flow for Soma."""
import logging
from api.soma_api import SomaApi
from requests import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 3000
class SomaFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Instantiate config flow."""
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
if user_input is None:
data = {
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
return self.async_show_form(step_id="user", data_schema=vol.Schema(data))
return await self.async_step_creation(user_input)
async def async_step_creation(self, user_input=None):
"""Finish config flow."""
api = SomaApi(user_input["host"], user_input["port"])
try:
result = await self.hass.async_add_executor_job(api.list_devices)
_LOGGER.info("Successfully set up Soma Connect")
if result["result"] == "success":
return self.async_create_entry(
title="Soma Connect",
data={"host": user_input["host"], "port": user_input["port"]},
)
_LOGGER.error(
"Connection to SOMA Connect failed (result:%s)", result["result"]
)
return self.async_abort(reason="result_error")
except RequestException:
_LOGGER.error("Connection to SOMA Connect failed with RequestException")
return self.async_abort(reason="connection_error")
except KeyError:
_LOGGER.error("Connection to SOMA Connect failed with KeyError")
return self.async_abort(reason="connection_error")
async def async_step_import(self, user_input=None):
"""Handle flow start from existing config section."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_setup")
return await self.async_step_creation(user_input)
| mit | 8,436,094,702,955,953,000 | 35.092308 | 85 | 0.622336 | false |
gcode-mirror/audacity | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Scripting.py | 85 | 10612 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.remove(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.remove(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.executable,sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-2.0 | 7,855,113,038,600,285,000 | 27.450402 | 160 | 0.684979 | false |
thatguystone/vault | vault/util.py | 1 | 1230 | import logging
import os
import sys
import subprocess
import time
_suffixes = [
[("k", "kb"), 0],
[("m", "mb"), 0],
[("g", "gb"), 0],
[("t", "tb"), 0],
]
log = logging.getLogger(__name__)
for i, s in enumerate(_suffixes):
if i == 0:
s[1] = 1024
else:
s[1] = _suffixes[i-1][1] * 1024
def human_size(size):
if isinstance(size, str):
size = size.lower()
for s in _suffixes:
for e in s[0]:
if size.endswith(e):
size = size[:-len(e)]
return abs(int(size)) * s[1]
return int(size)
def to_bytes(obj):
if obj and isinstance(obj, str):
obj = obj.encode("utf-8")
return obj
def _proc_name(args):
if len(args) == 0:
return "<unknown>"
return args[0]
def run(*args, stdin=None, cwd=None):
start = time.monotonic()
try:
p = subprocess.Popen(args,
stdin=sys.stdin if not stdin else subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
cwd=cwd)
out = p.communicate(input=to_bytes(stdin))[0].decode("utf-8")
finally:
log.debug("exec (took %fs): %s", time.monotonic() - start, args)
if p.returncode != 0:
raise RuntimeError("failed to execute {proc}, process said: {out}".format(
proc=_proc_name(args),
out=out.strip()))
return out
| mit | -874,061,750,009,920,000 | 19.163934 | 76 | 0.619512 | false |
cmauec/Cloud-Vision-Api | oauth2client/anyjson/simplejson/tests/test_for_json.py | 143 | 2767 | import unittest
import simplejson as json
class ForJson(object):
def for_json(self):
return {'for_json': 1}
class NestedForJson(object):
def for_json(self):
return {'nested': ForJson()}
class ForJsonList(object):
def for_json(self):
return ['list']
class DictForJson(dict):
def for_json(self):
return {'alpha': 1}
class ListForJson(list):
def for_json(self):
return ['list']
class TestForJson(unittest.TestCase):
def assertRoundTrip(self, obj, other, for_json=True):
if for_json is None:
# None will use the default
s = json.dumps(obj)
else:
s = json.dumps(obj, for_json=for_json)
self.assertEqual(
json.loads(s),
other)
def test_for_json_encodes_stand_alone_object(self):
self.assertRoundTrip(
ForJson(),
ForJson().for_json())
def test_for_json_encodes_object_nested_in_dict(self):
self.assertRoundTrip(
{'hooray': ForJson()},
{'hooray': ForJson().for_json()})
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertRoundTrip(
{'list': [0, ForJson(), 2, 3]},
{'list': [0, ForJson().for_json(), 2, 3]})
def test_for_json_encodes_object_nested_within_object(self):
self.assertRoundTrip(
NestedForJson(),
{'nested': {'for_json': 1}})
def test_for_json_encodes_list(self):
self.assertRoundTrip(
ForJsonList(),
ForJsonList().for_json())
def test_for_json_encodes_list_within_object(self):
self.assertRoundTrip(
{'nested': ForJsonList()},
{'nested': ForJsonList().for_json()})
def test_for_json_encodes_dict_subclass(self):
self.assertRoundTrip(
DictForJson(a=1),
DictForJson(a=1).for_json())
def test_for_json_encodes_list_subclass(self):
self.assertRoundTrip(
ListForJson(['l']),
ListForJson(['l']).for_json())
def test_for_json_ignored_if_not_true_with_dict_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
DictForJson(a=1),
{'a': 1},
for_json=for_json)
def test_for_json_ignored_if_not_true_with_list_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
ListForJson(['l']),
['l'],
for_json=for_json)
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
| gpl-2.0 | -3,713,522,146,700,502,500 | 27.525773 | 75 | 0.568124 | false |
sintefmath/Splipy | splipy/io/grdecl.py | 1 | 14711 | import numpy as np
from itertools import product, chain
from splipy import Surface, Volume, SplineObject, BSplineBasis
from splipy import surface_factory, volume_factory, curve_factory
from splipy.io import G2
from splipy.utils import ensure_listlike
from .master import MasterIO
import re
import warnings
from scipy.spatial import Delaunay
from scipy.spatial.qhull import QhullError
from tqdm import tqdm
import cv2
import h5py
class Box(object):
def __init__(self, x):
self.x = x
class DiscontBoxMesh(object):
def __init__(self, n, coord, zcorn):
nx, ny, nz = n
X = np.empty(n + 1, dtype=object)
Xz = np.zeros((nx + 1, ny + 1, 2 * nz, 3))
cells = np.empty(n, dtype=object)
for i, j, k in product(range(nx), range(ny), range(nz)):
x = []
for k0, j0, i0 in product(range(2), repeat=3):
# Interpolate to find the x,y values of this point
zmin, zmax = coord[i+i0, j+j0, :, 2]
z = zcorn[2*i+i0, 2*j+j0, 2*k+k0]
t = (z - zmax) / (zmin - zmax)
point = coord[i+i0, j+j0, 0] * t + coord[i+i0, j+j0, 1] * (1 - t)
x.append(point)
if X[i+i0,j+j0,k+k0] is None:
X[i+i0,j+j0,k+k0] = [point]
else:
X[i+i0,j+j0,k+k0].append(point)
Xz[i+i0,j+j0,2*k+k0,:] = point
cells[i,j,k] = Box(x)
self.X = X
self.Xz = Xz
self.n = n
def hull_or_none(x):
try:
return Delaunay(x)
except QhullError:
return None
self.plane_hull = np.array([
[Delaunay(np.reshape(coord[i:i+2, j:j+2, :, :], (8,3))) for j in range(ny)]
for i in range(nx)
], dtype=object)
self.hull = np.array([
[[hull_or_none(cell.x) for cell in cell_tower] for cell_tower in cells_tmp]
for cells_tmp in cells
], dtype=object)
def cell_at(self, x, guess=None):
# First, find the 'tower' containing x
check = -1
last_i = last_j = 0
numb_hits = []
if guess is not None:
i, j, _ = guess
check = self.plane_hull[i,j].find_simplex(x)
# if check > -1: print('correct tower!')
if check >= 0:
numb_hits += [(i,j)]
last_i = i
last_j = j
check = -1
if check == -1:
for (i, j), hull in np.ndenumerate(self.plane_hull):
check = hull.find_simplex(x)
if check >= 0:
numb_hits += [(i,j)]
last_i = i
last_j = j
i,j = last_i,last_j
# if len(numb_hits) != 1:
# print(numb_hits)
# print(x)
# print(guess)
# print(check)
# assert check >= 0
assert len(numb_hits) >= 1
# Find the correct cell in the 'tower'
check = -1
if guess is not None:
_, _, k = guess
check = self.hull[i,j,k].find_simplex(x)
# if check > -1: print('correct cell!')
if check == -1:
for (i,j) in numb_hits:
for k, hull in enumerate(self.hull[i,j,:]):
if hull is None: continue
check = hull.find_simplex(x)
if check >= 0: break
if check >= 0: break
if check < 0:
print(numb_hits)
print(x)
print(guess)
print(check)
# print(f'Returns {i} {j} {k} : {check}')
assert check >= 0
return i, j, k
def get_c0_avg(self):
"""Compute best-approximation vertices for a continuous mesh by averaging the location of all
corners that 'should' coincide.
"""
return np.array([[[np.mean(k,axis=0) for k in j] for j in i] for i in self.X])
def get_discontinuous_all(self):
"""Return a list of vertices suitable for a fully discontinuous mesh."""
return list(chain.from_iterable(xs[::-1] for xs in self.X.T.flat))
def get_discontinuous_z(self):
"""Return a list of vertices suitable for a mixed continuity mesh."""
return self.Xz
class GRDECL(MasterIO):
def __init__(self, filename):
if not filename.endswith('.grdecl'):
filename += '.grdecl'
self.filename = filename
self.attribute = {}
def __enter__(self):
self.fstream = open(self.filename, 'r')
self.line_number = 0
return self
def read_specgrid(self):
args = next(self.fstream).strip().split()
return np.array(args[:3], dtype=np.int32)
def read_coord(self):
nx, ny = self.n[:2]
ans = np.zeros((nx + 1, ny + 1, 2, 3))
for j, i in product(range(ny+1), range(nx+1)):
args = next(self.fstream).split()
ans[i,j,0,:] = np.array(args[:3], dtype=np.float64)
ans[i,j,1,:] = np.array(args[3:], dtype=np.float64)
return ans
def read_zcorn(self):
ntot = np.prod(self.n)*8
numbers = []
while len(numbers) < ntot:
numbers += next(self.fstream).split()
numbers = numbers[:ntot] # strip away any '/' characters at the end of the line
return np.reshape(np.array(numbers, dtype=np.float64), self.n*2, order='F')
def cell_property(self, dtype=np.float64):
ntot = np.prod(self.n)
numbers = []
while len(numbers) < ntot:
numbers += next(self.fstream).split()
numbers = numbers[:ntot] # strip away any '/' characters at the end of the line
return np.array(numbers, dtype=dtype)
def read(self):
for line in self.fstream:
line = line.strip().lower()
if line == 'specgrid':
self.n = self.read_specgrid()
elif line == 'coord':
self.coord = self.read_coord()
elif line == 'zcorn':
self.zcorn = self.read_zcorn()
elif line in {'actnum', 'permx', 'permy', 'permz', 'poro', 'satnum', 'rho', 'kx', 'kz', 'emodulus25', 'poissonratio25', 'pressure', }:
dtype = np.int32 if line in {'actnum', 'satnum'} else np.float64
self.attribute[line] = self.cell_property(dtype)
elif line in {'grid', '/', ''} or line.startswith('--'):
pass
elif not re.match('[0-9]', line[0]):
warnings.showwarning(
'Unkown keyword "{}" encountered in file'.format(line.split()[0]),
SyntaxWarning, self.filename, self.line_number, line=[],
)
else:
pass # silently skip large number blocks
self.raw = DiscontBoxMesh(self.n, self.coord, self.zcorn)
def get_c0_mesh(self):
# Create the C0-mesh
nx, ny, nz = self.n
X = self.raw.get_c0_avg()
b1 = BSplineBasis(2, [0] + [i/nx for i in range(nx+1)] + [1])
b2 = BSplineBasis(2, [0] + [i/ny for i in range(ny+1)] + [1])
b3 = BSplineBasis(2, [0] + [i/nz for i in range(nz+1)] + [1])
c0_vol = volume_factory.interpolate(X, [b1, b2, b3])
return c0_vol
def get_cm1_mesh(self):
# Create the C^{-1} mesh
nx, ny, nz = self.n
Xm1 = self.raw.get_discontinuous_all()
b1 = BSplineBasis(2, sorted(list(range(self.n[0]+1))*2))
b2 = BSplineBasis(2, sorted(list(range(self.n[1]+1))*2))
b3 = BSplineBasis(2, sorted(list(range(self.n[2]+1))*2))
discont_vol = Volume(b1, b2, b3, Xm1)
return discont_vol
def get_mixed_cont_mesh(self):
# Create mixed discontinuity mesh: C^0, C^0, C^{-1}
nx, ny, nz = self.n
Xz = self.raw.get_discontinuous_z()
b1 = BSplineBasis(2, sorted(list(range(self.n[0]+1))+[0,self.n[0]]))
b2 = BSplineBasis(2, sorted(list(range(self.n[1]+1))+[0,self.n[1]]))
b3 = BSplineBasis(2, sorted(list(range(self.n[2]+1))*2))
true_vol = Volume(b1, b2, b3, Xz, raw=True)
return true_vol
def texture(self, p, ngeom, ntexture, method='full', irange=[None,None], jrange=[None,None]):
# Set the dimensions of geometry and texture map
# ngeom = np.floor(self.n / (p-1))
# ntexture = np.floor(self.n * n)
# ngeom = ngeom.astype(np.int32)
# ntexture = ntexture.astype(np.int32)
ngeom = ensure_listlike(ngeom, 3)
ntexture = ensure_listlike(ntexture, 3)
p = ensure_listlike(p, 3)
# Create the geometry
ngx, ngy, ngz = ngeom
b1 = BSplineBasis(p[0], [0]*(p[0]-1) + [i/ngx for i in range(ngx+1)] + [1]*(p[0]-1))
b2 = BSplineBasis(p[1], [0]*(p[1]-1) + [i/ngy for i in range(ngy+1)] + [1]*(p[1]-1))
b3 = BSplineBasis(p[2], [0]*(p[2]-1) + [i/ngz for i in range(ngz+1)] + [1]*(p[2]-1))
l2_fit = surface_factory.least_square_fit
vol = self.get_c0_mesh()
i = slice(irange[0], irange[1], None)
j = slice(jrange[0], jrange[1], None)
# special case number of evaluation points for full domain
if irange[1] == None: irange[1] = vol.shape[0]
if jrange[1] == None: jrange[1] = vol.shape[1]
if irange[0] == None: irange[0] = 0
if jrange[0] == None: jrange[0] = 0
nu = np.diff(irange)
nv = np.diff(jrange)
nw = vol.shape[2]
u = np.linspace(0, 1, nu)
v = np.linspace(0, 1, nv)
w = np.linspace(0, 1, nw)
crvs = []
crvs.append(curve_factory.polygon(vol[i ,jrange[0] , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[0] ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[1]-1, 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[1]-1,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,j , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,j ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,j , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,j ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,jrange[0] , :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,jrange[1]-1, :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,jrange[0] , :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,jrange[1]-1, :,:].squeeze()))
# with G2('curves.g2') as myfile:
# myfile.write(crvs)
# print('Written curve.g2')
if method == 'full':
bottom = l2_fit(vol[i, j, 0,:].squeeze(), [b1, b2], [u, v])
top = l2_fit(vol[i, j, -1,:].squeeze(), [b1, b2], [u, v])
left = l2_fit(vol[irange[0] ,j, :,:].squeeze(), [b2, b3], [v, w])
right = l2_fit(vol[irange[1]-1,j, :,:].squeeze(), [b2, b3], [v, w])
front = l2_fit(vol[i, jrange[0], :,:].squeeze(), [b1, b3], [u, w])
back = l2_fit(vol[i, jrange[1]-1,:,:].squeeze(), [b1, b3], [u, w])
volume = volume_factory.edge_surfaces([left, right, front, back, bottom, top])
elif method == 'z':
bottom = l2_fit(vol[i,j, 0,:].squeeze(), [b1, b2], [u, v])
top = l2_fit(vol[i,j,-1,:].squeeze(), [b1, b2], [u, v])
volume = volume_factory.edge_surfaces([bottom, top])
volume.set_order(*p)
volume.refine(ngz - 1, direction='w')
volume.reverse(direction=2)
# Point-to-cell mapping
# TODO: Optimize more
eps = 1e-2
u = [np.linspace(eps, 1-eps, n) for n in ntexture]
points = volume(*u).reshape(-1, 3)
cellids = np.zeros(points.shape[:-1], dtype=int)
cell = None
nx, ny, nz = self.n
for ptid, point in enumerate(tqdm(points, desc='Inverse mapping')):
i, j, k = cell = self.raw.cell_at(point) # , guess=cell)
cellid = i*ny*nz + j*nz + k
cellids[ptid] = cellid
cellids = cellids.reshape(tuple(ntexture))
all_textures = {}
for name in self.attribute:
data = self.attribute[name][cellids]
# TODO: This flattens the image if it happens to be 3D (or higher...)
# do we need a way to communicate the structure back to the caller?
# data = data.reshape(-1, data.shape[-1])
# TODO: This normalizes the image,
# but we need a way to communicate the ranges back to the caller
# a, b = min(data.flat), max(data.flat)
# data = ((data - a) / (b - a) * 255).astype(np.uint8)
all_textures[name] = data
all_textures['cellids'] = cellids
return volume, all_textures
def to_ifem(self, p, ngeom, ntexture, method='full', irange=[None,None], jrange=[None,None]):
translate = {
'emodulus25' : 'stiffness',
'kx' : 'permx',
'ky' : 'permy',
'kz' : 'permz',
'poissonratio25': 'poisson'}
h5_filename = 'textures.h5'
h5_file = h5py.File(h5_filename, 'w')
vol, textures = self.texture(p, ngeom, ntexture, method, irange, jrange)
# augment dataset with missing information
if 'kx' in textures and not 'ky' in textures:
textures['ky'] = textures['kx']
# print information to png-images and hdf5-files
print(r'<porotexturematerial>')
for name, data in textures.items():
# translate to more IFEM-friendly terminology
if name in translate: name = translate[name]
h5_file.create_dataset(name, data=data, compression='gzip')
a, b = min(data.flat), max(data.flat)
img = ((data - a) / (b - a) * 255).astype(np.uint8)
n = data.shape
img = img.reshape(n[0], n[1]*n[2])
print(' <property file="{}.png" min="{}" max="{}" name="{}" nx="{}" ny="{}" nz="{}"/>'.format(name, a,b, name, n[0], n[1], n[2]))
cv2.imwrite(name+'.png', img)
print(r'</porotexturematerial>')
h5_file.close()
print('Written {}'.format(h5_filename))
with G2('geom.g2') as myfile:
myfile.write(vol)
def __exit__(self, exc_type, exc_value, traceback):
self.fstream.close()
| gpl-3.0 | 6,873,524,519,651,488,000 | 37.713158 | 146 | 0.515737 | false |
rienafairefr/pynYNAB | tests/test_operations.py | 2 | 2203 | import json
import pytest
from pynYNAB.Client import nYnabClient
from pynYNAB.ClientFactory import nYnabClientFactory
from pynYNAB.exceptions import NoBudgetNameException
from pynYNAB.schema.catalog import BudgetVersion
class MockConnection2(object):
id = '12345'
@pytest.fixture
def factory():
return nYnabClientFactory('sqlite://')
@pytest.fixture
def connection():
return MockConnection2()
@pytest.fixture
def client(factory, connection):
return factory.create_client(budget_name='budget_name', connection=connection, sync=False)
def test_create_budget(factory):
currency_format = dict(
iso_code='USD',
example_format='123,456.78',
decimal_digits=2,
decimal_separator='.',
symbol_first=True,
group_separator=',',
currency_symbol='$',
display_symbol=True
)
date_format = dict(
format='MM/DD/YYYY'
)
class MockConnection(object):
def dorequest(this, request_dic, opname):
assert opname == opname
assert request_dic['currency_format'] == json.dumps(currency_format)
assert request_dic['date_format'] == json.dumps(date_format)
user_id = '1234'
id = '1234'
client = factory.create_client(budget_name='budget_name', connection=MockConnection(), sync=False)
client.create_budget(budget_name='New Budget')
def test_client_nobudget():
def create_client_no_budget():
nYnabClient()
pytest.raises(NoBudgetNameException, create_client_no_budget)
def test_select_budget(client):
budget_version1 = BudgetVersion(version_name='TestBudget')
budget_version2 = BudgetVersion(version_name='NewTestBudget')
client.catalog.ce_budget_versions = [budget_version1, budget_version2]
client.select_budget(budget_name='NewTestBudget')
assert budget_version2.id == client.budget_version_id
def test_create_client(client, connection):
assert connection == client.connection
assert connection == client.catalogClient.connection
assert connection == client.budgetClient.connection
assert 'budget_name' == client.budget_name
assert 'sqlite://' == str(client.session.bind.url)
| mit | -7,912,482,156,133,166,000 | 26.5375 | 102 | 0.696323 | false |
lildadou/Flexget | flexget/utils/qualities.py | 14 | 17358 | from __future__ import unicode_literals, division, absolute_import
import re
import copy
import logging
log = logging.getLogger('utils.qualities')
class QualityComponent(object):
""""""
def __init__(self, type, value, name, regexp=None, modifier=None, defaults=None):
"""
:param type: Type of quality component. (resolution, source, codec, or audio)
:param value: Value used to sort this component with others of like type.
:param name: Canonical name for this quality component.
:param regexp: Regexps used to match this component.
:param modifier: An integer that affects sorting above all other components.
:param defaults: An iterable defining defaults for other quality components if this component matches.
"""
if type not in ['resolution', 'source', 'codec', 'audio']:
raise ValueError('%s is not a valid quality component type.' % type)
self.type = type
self.value = value
self.name = name
self.modifier = modifier
self.defaults = defaults or []
# compile regexp
if regexp is None:
regexp = re.escape(name)
self.regexp = re.compile('(?<![^\W_])(' + regexp + ')(?![^\W_])', re.IGNORECASE)
def matches(self, text):
"""Test if quality matches to text.
:param string text: data te be tested against
:returns: tuple (matches, remaining text without quality data)
"""
match = self.regexp.search(text)
if not match:
return False, ""
else:
# remove matching part from the text
text = text[:match.start()] + text[match.end():]
return True, text
def __hash__(self):
return hash(self.type + str(self.value))
def __nonzero__(self):
return self.value
def __eq__(self, other):
if isinstance(other, basestring):
other = _registry.get(other)
if not isinstance(other, QualityComponent):
raise TypeError('Cannot compare %r and %r' % (self, other))
if other.type == self.type:
return self.value == other.value
else:
raise TypeError('Cannot compare %s and %s' % (self.type, other.type))
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, basestring):
other = _registry.get(other)
if not isinstance(other, QualityComponent):
raise TypeError('Cannot compare %r and %r' % (self, other))
if other.type == self.type:
return self.value < other.value
else:
raise TypeError('Cannot compare %s and %s' % (self.type, other.type))
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __add__(self, other):
if not isinstance(other, int):
raise TypeError()
l = globals().get('_' + self.type + 's')
index = l.index(self) + other
if index >= len(l):
index = -1
return l[index]
def __sub__(self, other):
if not isinstance(other, int):
raise TypeError()
l = globals().get('_' + self.type + 's')
index = l.index(self) - other
if index < 0:
index = 0
return l[index]
def __repr__(self):
return '<%s(name=%s,value=%s)>' % (self.type.title(), self.name, self.value)
def __str__(self):
return self.name
def __deepcopy__(self, memo=None):
# No mutable attributes, return a regular copy
return copy.copy(self)
_resolutions = [
QualityComponent('resolution', 10, '360p'),
QualityComponent('resolution', 20, '368p', '368p?'),
QualityComponent('resolution', 30, '480p', '480p?'),
QualityComponent('resolution', 40, '576p', '576p?'),
QualityComponent('resolution', 45, 'hr'),
QualityComponent('resolution', 50, '720i'),
QualityComponent('resolution', 60, '720p', '(1280x)?720(p|hd)?x?(50)?'),
QualityComponent('resolution', 70, '1080i'),
QualityComponent('resolution', 80, '1080p', '(1920x)?1080p?')
]
_sources = [
QualityComponent('source', 10, 'workprint', modifier=-8),
QualityComponent('source', 20, 'cam', '(?:hd)?cam', modifier=-7),
QualityComponent('source', 30, 'ts', '(?:hd)?ts|telesync', modifier=-6),
QualityComponent('source', 40, 'tc', 'tc|telecine', modifier=-5),
QualityComponent('source', 50, 'r5', 'r[2-8c]', modifier=-4),
QualityComponent('source', 60, 'hdrip', 'hd[\W_]?rip', modifier=-3),
QualityComponent('source', 70, 'ppvrip', 'ppv[\W_]?rip', modifier=-2),
QualityComponent('source', 80, 'preair', modifier=-1),
QualityComponent('source', 90, 'tvrip', 'tv[\W_]?rip'),
QualityComponent('source', 100, 'dsr', 'dsr|ds[\W_]?rip'),
QualityComponent('source', 110, 'sdtv', '(?:[sp]dtv|dvb)(?:[\W_]?rip)?'),
QualityComponent('source', 120, 'webrip', 'web[\W_]?rip'),
QualityComponent('source', 130, 'dvdscr', '(?:(?:dvd|web)[\W_]?)?scr(?:eener)?', modifier=0),
QualityComponent('source', 140, 'bdscr', 'bdscr(?:eener)?'),
QualityComponent('source', 150, 'hdtv', 'a?hdtv(?:[\W_]?rip)?'),
QualityComponent('source', 160, 'webdl', 'web(?:[\W_]?(dl|hd))'),
QualityComponent('source', 170, 'dvdrip', 'dvd(?:[\W_]?rip)?'),
QualityComponent('source', 175, 'remux'),
QualityComponent('source', 180, 'bluray', '(?:b[dr][\W_]?rip|blu[\W_]?ray(?:[\W_]?rip)?)')
]
_codecs = [
QualityComponent('codec', 10, 'divx'),
QualityComponent('codec', 20, 'xvid'),
QualityComponent('codec', 30, 'h264', '[hx].?264'),
QualityComponent('codec', 40, 'h265', '[hx].?265|hevc'),
QualityComponent('codec', 50, '10bit', '10.?bit|hi10p')
]
channels = '(?:(?:[\W_]?5[\W_]?1)|(?:[\W_]?2[\W_]?(?:0|ch)))'
_audios = [
QualityComponent('audio', 10, 'mp3'),
# TODO: No idea what order these should go in or if we need different regexps
QualityComponent('audio', 20, 'aac', 'aac%s?' % channels),
QualityComponent('audio', 30, 'dd5.1', 'dd%s' % channels),
QualityComponent('audio', 40, 'ac3', 'ac3%s?' % channels),
QualityComponent('audio', 50, 'flac', 'flac%s?' % channels),
# The DTSs are a bit backwards, but the more specific one needs to be parsed first
QualityComponent('audio', 60, 'dtshd', 'dts[\W_]?hd(?:[\W_]?ma)?'),
QualityComponent('audio', 70, 'dts'),
QualityComponent('audio', 80, 'truehd')
]
_UNKNOWNS = {
'resolution': QualityComponent('resolution', 0, 'unknown'),
'source': QualityComponent('source', 0, 'unknown'),
'codec': QualityComponent('codec', 0, 'unknown'),
'audio': QualityComponent('audio', 0, 'unknown')
}
# For wiki generating help
'''for type in (_resolutions, _sources, _codecs, _audios):
print '{{{#!td style="vertical-align: top"'
for item in reversed(type):
print '- ' + item.name
print '}}}'
'''
_registry = {}
for items in (_resolutions, _sources, _codecs, _audios):
for item in items:
_registry[item.name] = item
def all_components():
return _registry.itervalues()
class Quality(object):
"""Parses and stores the quality of an entry in the four component categories."""
def __init__(self, text=''):
"""
:param text: A string to parse quality from
"""
self.text = text
self.clean_text = text
if text:
self.parse(text)
else:
self.resolution = _UNKNOWNS['resolution']
self.source = _UNKNOWNS['source']
self.codec = _UNKNOWNS['codec']
self.audio = _UNKNOWNS['audio']
def parse(self, text):
"""Parses a string to determine the quality in the four component categories.
:param text: The string to parse
"""
self.text = text
self.clean_text = text
self.resolution = self._find_best(_resolutions, _UNKNOWNS['resolution'], False)
self.source = self._find_best(_sources, _UNKNOWNS['source'])
self.codec = self._find_best(_codecs, _UNKNOWNS['codec'])
self.audio = self._find_best(_audios, _UNKNOWNS['audio'])
# If any of the matched components have defaults, set them now.
for component in self.components:
for default in component.defaults:
default = _registry[default]
if not getattr(self, default.type):
setattr(self, default.type, default)
def _find_best(self, qlist, default=None, strip_all=True):
"""Finds the highest matching quality component from `qlist`"""
result = None
search_in = self.clean_text
for item in qlist:
match = item.matches(search_in)
if match[0]:
result = item
self.clean_text = match[1]
if strip_all:
# In some cases we want to strip all found quality components,
# even though we're going to return only the last of them.
search_in = self.clean_text
if item.modifier is not None:
# If this item has a modifier, do not proceed to check higher qualities in the list
break
return result or default
@property
def name(self):
name = ' '.join(str(p) for p in (self.resolution, self.source, self.codec, self.audio) if p.value != 0)
return name or 'unknown'
@property
def components(self):
return [self.resolution, self.source, self.codec, self.audio]
@property
def _comparator(self):
modifier = sum(c.modifier for c in self.components if c.modifier)
return [modifier] + self.components
def __contains__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other or not self:
return False
for cat in ('resolution', 'source', 'audio', 'codec'):
othercat = getattr(other, cat)
if othercat and othercat != getattr(self, cat):
return False
return True
def __nonzero__(self):
return any(self._comparator)
def __eq__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other:
raise TypeError('`%s` does not appear to be a valid quality string.' % other.text)
if not isinstance(other, Quality):
if other is None:
return False
raise TypeError('Cannot compare %r and %r' % (self, other))
return self._comparator == other._comparator
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other:
raise TypeError('`%s` does not appear to be a valid quality string.' % other.text)
if not isinstance(other, Quality):
raise TypeError('Cannot compare %r and %r' % (self, other))
return self._comparator < other._comparator
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __repr__(self):
return '<Quality(resolution=%s,source=%s,codec=%s,audio=%s)>' % (self.resolution, self.source,
self.codec, self.audio)
def __str__(self):
return self.name
def __hash__(self):
# Make these usable as dict keys
return hash(self.name)
def get(quality_name):
"""Returns a quality object based on canonical quality name."""
found_components = {}
for part in quality_name.lower().split():
component = _registry.get(part)
if not component:
raise ValueError('`%s` is not a valid quality string' % part)
if component.type in found_components:
raise ValueError('`%s` cannot be defined twice in a quality' % component.type)
found_components[component.type] = component
if not found_components:
raise ValueError('No quality specified')
result = Quality()
for type, component in found_components.iteritems():
setattr(result, type, component)
return result
class RequirementComponent(object):
"""Represents requirements for a given component type. Can evaluate whether a given QualityComponent
meets those requirements."""
def __init__(self, type):
self.type = type
self.reset()
def reset(self):
self.min = None
self.max = None
self.acceptable = []
self.none_of = []
def allows(self, comp, loose=False):
if comp.type != self.type:
raise TypeError('Cannot compare %r against %s' % (comp, self.type))
if comp in self.none_of:
return False
if loose:
return True
if comp in self.acceptable:
return True
if self.min or self.max:
if self.min and comp < self.min:
return False
if self.max and comp > self.max:
return False
return True
if not self.acceptable:
return True
return False
def add_requirement(self, text):
if '-' in text:
min, max = text.split('-')
min, max = _registry[min], _registry[max]
if min.type != max.type != self.type:
raise ValueError('Component type mismatch: %s' % text)
self.min, self.max = min, max
elif '|' in text:
quals = text.split('|')
quals = [_registry[qual] for qual in quals]
if any(qual.type != self.type for qual in quals):
raise ValueError('Component type mismatch: %s' % text)
self.acceptable.extend(quals)
else:
qual = _registry[text.strip('!<>=+')]
if qual.type != self.type:
raise ValueError('Component type mismatch!')
if text in _registry:
self.acceptable.append(qual)
else:
if text[0] == '<':
if text[1] != '=':
qual -= 1
self.max = qual
elif text[0] == '>' or text.endswith('+'):
if text[1] != '=' and not text.endswith('+'):
qual += 1
self.min = qual
elif text[0] == '!':
self.none_of.append(qual)
class Requirements(object):
"""Represents requirements for allowable qualities. Can determine whether a given Quality passes requirements."""
def __init__(self, req=''):
self.text = ''
self.resolution = RequirementComponent('resolution')
self.source = RequirementComponent('source')
self.codec = RequirementComponent('codec')
self.audio = RequirementComponent('audio')
if req:
self.parse_requirements(req)
@property
def components(self):
return [self.resolution, self.source, self.codec, self.audio]
def parse_requirements(self, text):
"""
Parses a requirements string.
:param text: The string containing quality requirements.
"""
text = text.lower()
if self.text:
self.text += ' '
self.text += text
if self.text == 'any':
for component in self.components:
component.reset()
return
text = text.replace(',', ' ')
parts = text.split()
try:
for part in parts:
if '-' in part:
found = _registry[part.split('-')[0]]
elif '|' in part:
found = _registry[part.split('|')[0]]
else:
found = _registry[part.strip('!<>=+')]
for component in self.components:
if found.type == component.type:
component.add_requirement(part)
except KeyError as e:
raise ValueError('%s is not a valid quality component.' % e.args[0])
def allows(self, qual, loose=False):
"""Determine whether this set of requirements allows a given quality.
:param Quality qual: The quality to evaluate.
:param bool loose: If True, only ! (not) requirements will be enforced.
:rtype: bool
:returns: True if given quality passes all component requirements.
"""
if isinstance(qual, basestring):
qual = Quality(qual)
if not qual:
raise TypeError('`%s` does not appear to be a valid quality string.' % qual.text)
for r_component, q_component in zip(self.components, qual.components):
if not r_component.allows(q_component, loose=loose):
return False
return True
def __str__(self):
return self.text or 'any'
def __repr__(self):
return '<Requirements(%s)>' % self
| mit | -2,332,596,124,858,536,400 | 35.931915 | 117 | 0.563314 | false |
teltek/edx-platform | common/djangoapps/enrollment/api.py | 2 | 18093 | """
Enrollment API for creating, updating, and deleting enrollments. Also provides access to enrollment information at a
course level, such as available course modes.
"""
import importlib
import logging
from django.conf import settings
from django.core.cache import cache
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from enrollment import errors
log = logging.getLogger(__name__)
DEFAULT_DATA_API = 'enrollment.data'
def get_enrollments(user_id, include_inactive=False):
"""Retrieves all the courses a user is enrolled in.
Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled
in the the course.
Args:
user_id (str): The username of the user we want to retrieve course enrollment information for.
include_inactive (bool): Determines whether inactive enrollments will be included
Returns:
A list of enrollment information for the given user.
Examples:
>>> get_enrollments("Bob")
[
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
},
{
"created": "2014-10-25T20:18:00Z",
"mode": "verified",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/edX-Insider/2014T2",
"course_name": "edX Insider Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": True
}
}
]
"""
return _data_api().get_course_enrollments(user_id, include_inactive)
def get_enrollment(user_id, course_id):
"""Retrieves all enrollment information for the user in respect to a specific course.
Gets all the course enrollment information specific to a user in a course.
Args:
user_id (str): The user to get course enrollment information for.
course_id (str): The course to get enrollment information for.
Returns:
A serializable dictionary of the course enrollment.
Example:
>>> get_enrollment("Bob", "edX/DemoX/2014T2")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
return _data_api().get_course_enrollment(user_id, course_id)
def add_enrollment(user_id, course_id, mode=None, is_active=True, enrollment_attributes=None):
"""Enrolls a user in a course.
Enrolls a user in a course. If the mode is not specified, this will default to `CourseMode.DEFAULT_MODE_SLUG`.
Arguments:
user_id (str): The user to enroll.
course_id (str): The course to enroll the user in.
mode (str): Optional argument for the type of enrollment to create. Ex. 'audit', 'honor', 'verified',
'professional'. If not specified, this defaults to the default course mode.
is_active (boolean): Optional argument for making the new enrollment inactive. If not specified, is_active
defaults to True.
enrollment_attributes (list): Attributes to be set the enrollment.
Returns:
A serializable dictionary of the new course enrollment.
Example:
>>> add_enrollment("Bob", "edX/DemoX/2014T2", mode="audit")
{
"created": "2014-10-20T20:18:00Z",
"mode": "audit",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "audit",
"name": "Audit",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
if mode is None:
mode = _default_course_mode(course_id)
validate_course_mode(course_id, mode, is_active=is_active)
enrollment = _data_api().create_course_enrollment(user_id, course_id, mode, is_active)
if enrollment_attributes is not None:
set_enrollment_attributes(user_id, course_id, enrollment_attributes)
return enrollment
def update_enrollment(user_id, course_id, mode=None, is_active=None, enrollment_attributes=None, include_expired=False):
"""Updates the course mode for the enrolled user.
Update a course enrollment for the given user and course.
Arguments:
user_id (str): The user associated with the updated enrollment.
course_id (str): The course associated with the updated enrollment.
Keyword Arguments:
mode (str): The new course mode for this enrollment.
is_active (bool): Sets whether the enrollment is active or not.
enrollment_attributes (list): Attributes to be set the enrollment.
include_expired (bool): Boolean denoting whether expired course modes should be included.
Returns:
A serializable dictionary representing the updated enrollment.
Example:
>>> update_enrollment("Bob", "edX/DemoX/2014T2", "honor")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
log.info(u'Starting Update Enrollment process for user {user} in course {course} to mode {mode}'.format(
user=user_id,
course=course_id,
mode=mode,
))
if mode is not None:
validate_course_mode(course_id, mode, is_active=is_active, include_expired=include_expired)
enrollment = _data_api().update_course_enrollment(user_id, course_id, mode=mode, is_active=is_active)
if enrollment is None:
msg = u"Course Enrollment not found for user {user} in course {course}".format(user=user_id, course=course_id)
log.warn(msg)
raise errors.EnrollmentNotFoundError(msg)
else:
if enrollment_attributes is not None:
set_enrollment_attributes(user_id, course_id, enrollment_attributes)
log.info(u'Course Enrollment updated for user {user} in course {course} to mode {mode}'.format(
user=user_id,
course=course_id,
mode=mode
))
return enrollment
def get_course_enrollment_details(course_id, include_expired=False):
"""Get the course modes for course. Also get enrollment start and end date, invite only, etc.
Given a course_id, return a serializable dictionary of properties describing course enrollment information.
Args:
course_id (str): The Course to get enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary of course enrollment information.
Example:
>>> get_course_enrollment_details("edX/DemoX/2014T2")
{
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
"""
cache_key = u'enrollment.course.details.{course_id}.{include_expired}'.format(
course_id=course_id,
include_expired=include_expired
)
cached_enrollment_data = None
try:
cached_enrollment_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception (for example, memcache keys that contain spaces)
log.exception(u"Error occurred while retrieving course enrollment details from the cache")
if cached_enrollment_data:
log.info(u"Get enrollment data for course %s (cached)", course_id)
return cached_enrollment_data
course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired)
try:
cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
cache.set(cache_key, course_enrollment_details, cache_time_out)
except Exception:
# Catch any unexpected errors during caching.
log.exception(u"Error occurred while caching course enrollment details for course %s", course_id)
raise errors.CourseEnrollmentError(u"An unexpected error occurred while retrieving course enrollment details.")
log.info(u"Get enrollment data for course %s", course_id)
return course_enrollment_details
def set_enrollment_attributes(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>set_enrollment_attributes(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
_data_api().add_or_update_enrollment_attr(user_id, course_id, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
return _data_api().get_enrollment_attributes(user_id, course_id)
def _default_course_mode(course_id):
"""Return the default enrollment for a course.
Special case the default enrollment to return if nothing else is found.
Arguments:
course_id (str): The course to check against for available course modes.
Returns:
str
"""
course_modes = CourseMode.modes_for_course(CourseKey.from_string(course_id))
available_modes = [m.slug for m in course_modes]
if CourseMode.DEFAULT_MODE_SLUG in available_modes:
return CourseMode.DEFAULT_MODE_SLUG
elif 'audit' in available_modes:
return 'audit'
elif 'honor' in available_modes:
return 'honor'
return CourseMode.DEFAULT_MODE_SLUG
def validate_course_mode(course_id, mode, is_active=None, include_expired=False):
"""Checks to see if the specified course mode is valid for the course.
If the requested course mode is not available for the course, raise an error with corresponding
course enrollment information.
Arguments:
course_id (str): The course to check against for available course modes.
mode (str): The slug for the course mode specified in the enrollment.
Keyword Arguments:
is_active (bool): Whether the enrollment is to be activated or deactivated.
include_expired (bool): Boolean denoting whether expired course modes should be included.
Returns:
None
Raises:
CourseModeNotFound: raised if the course mode is not found.
"""
# If the client has requested an enrollment deactivation, we want to include expired modes
# in the set of available modes. This allows us to unenroll users from expired modes.
# If include_expired is set as True we should not redetermine its value.
if not include_expired:
include_expired = not is_active if is_active is not None else False
course_enrollment_info = _data_api().get_course_enrollment_info(course_id, include_expired=include_expired)
course_modes = course_enrollment_info["course_modes"]
available_modes = [m['slug'] for m in course_modes]
if mode not in available_modes:
msg = (
u"Specified course mode '{mode}' unavailable for course {course_id}. "
u"Available modes were: {available}"
).format(
mode=mode,
course_id=course_id,
available=", ".join(available_modes)
)
log.warn(msg)
raise errors.CourseModeNotFoundError(msg, course_enrollment_info)
def unenroll_user_from_all_courses(user_id):
"""
Unenrolls a specified user from all of the courses they are currently enrolled in.
:param user_id: The id of the user being unenrolled.
:return: The IDs of all of the organizations from which the learner was unenrolled.
"""
return _data_api().unenroll_user_from_all_courses(user_id)
def _data_api():
"""Returns a Data API.
This relies on Django settings to find the appropriate data API.
"""
# We retrieve the settings in-line here (rather than using the
# top-level constant), so that @override_settings will work
# in the test suite.
api_path = getattr(settings, "ENROLLMENT_DATA_API", DEFAULT_DATA_API)
try:
return importlib.import_module(api_path)
except (ImportError, ValueError):
log.exception(u"Could not load module at '{path}'".format(path=api_path))
raise errors.EnrollmentApiLoadError(api_path)
| agpl-3.0 | 405,106,475,702,398,660 | 36.459627 | 120 | 0.55773 | false |
mdhaman/superdesk-core | apps/marked_desks/service.py | 3 | 3511 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from flask import current_app as app
from superdesk import get_resource_service
from superdesk.services import BaseService
from eve.utils import ParsedRequest
from superdesk.notification import push_notification
from apps.archive.common import get_user
from eve.utils import config
from superdesk.utc import utcnow
from apps.archive.common import ITEM_MARK, ITEM_UNMARK
def get_marked_items(desk_id):
"""Get items marked for given desk"""
query = {
'query': {'filtered': {'filter': {'term': {'marked_desks.desk_id': str(desk_id)}}}},
'sort': [{'versioncreated': 'desc'}],
'size': 200
}
request = ParsedRequest()
request.args = {'source': json.dumps(query), 'repo': 'archive,published'}
return list(get_resource_service('search').get(req=request, lookup=None))
class MarkedForDesksService(BaseService):
def create(self, docs, **kwargs):
"""Toggle marked desk status for given desk and item."""
service = get_resource_service('archive')
published_service = get_resource_service('published')
ids = []
for doc in docs:
item = service.find_one(req=None, guid=doc['marked_item'])
if not item:
ids.append(None)
continue
ids.append(item['_id'])
marked_desks = item.get('marked_desks', [])
if not marked_desks:
marked_desks = []
existing_mark = next((m for m in marked_desks if m['desk_id'] == doc['marked_desk']), None)
if existing_mark:
# there is an existing mark so this is un-mark action
marked_desks = [m for m in marked_desks if m['desk_id'] != doc['marked_desk']]
marked_desks_on = False # highlight toggled off
else:
# there is no existing mark so this is mark action
user = get_user() or {}
new_mark = {}
new_mark['desk_id'] = doc['marked_desk']
new_mark['user_marked'] = str(user.get(config.ID_FIELD, ''))
new_mark['date_marked'] = utcnow()
marked_desks.append(new_mark)
marked_desks_on = True
updates = {'marked_desks': marked_desks}
service.system_update(item['_id'], updates, item)
publishedItems = published_service.find({'item_id': item['_id']})
for publishedItem in publishedItems:
if publishedItem['_current_version'] == item['_current_version'] or not marked_desks_on:
updates = {'marked_desks': marked_desks}
published_service.system_update(publishedItem['_id'], updates, publishedItem)
push_notification(
'item:marked_desks',
marked=int(marked_desks_on),
item_id=item['_id'],
mark_id=str(doc['marked_desk']))
if marked_desks_on:
app.on_archive_item_updated({'desk_id': doc['marked_desk']}, item, ITEM_MARK)
else:
app.on_archive_item_updated({'desk_id': doc['marked_desk']}, item, ITEM_UNMARK)
return ids
| agpl-3.0 | 3,916,429,104,178,711,000 | 38.897727 | 104 | 0.589576 | false |
BondAnthony/ansible | hacking/tests/gen_distribution_version_testcase.py | 13 | 2703 | #!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of distro.linux_distribution()
and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os.path
import platform
import subprocess
import sys
from ansible.module_utils import distro
from ansible.module_utils._text import to_text
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/etc/flatcar/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = distro.linux_distribution(full_distribution_name=False)
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
b_ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
ansible_out = to_text(b_ansible_out)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except Exception:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'distro': {
'codename': distro.codename(),
'id': distro.id(),
'name': distro.name(),
'version': distro.version(),
'version_best': distro.version(best=True),
'lsb_release_info': distro.lsb_release_info(),
'os_release_info': distro.os_release_info(),
},
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
system = platform.system()
if system != 'Linux':
output['platform.system'] = system
release = platform.release()
if release:
output['platform.release'] = release
print(json.dumps(output, indent=4))
| gpl-3.0 | 7,522,184,532,667,212,000 | 25.242718 | 115 | 0.645209 | false |
prasen-ftech/pywinauto | examples/windowmediaplayer.py | 19 | 2581 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Some automation of Windows Media player"
__revision__ = "$Revision$"
#import os
import time
import sys
try:
from pywinauto import application
except ImportError:
import os.path
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
import sys
sys.path.append(pywinauto_path)
from pywinauto import application
def WindowsMedia():
app = application.Application()
try:
app.start_( # connect_(path =
ur"C:\Program Files\Windows Media Player\wmplayer.exe")
except application.ProcessNotFoundError:
print "You must first start Windows Media "\
"Player before running this script"
sys.exit()
app.WindowsMediaPlayer.MenuSelect("View->GoTo->Library")
app.WindowsMediaPlayer.MenuSelect("View->Choose Columns")
#for ctrl in app.ChooseColumns.Children():
# print ctrl.Class()
print "Is it checked already:", app.ChooseColumsn.ListView.IsChecked(1)
# Check an Item in the listview
app.ChooseColumns.ListView.Check(1)
time.sleep(.5)
print "Shold be checked now:", app.ChooseColumsn.ListView.IsChecked(1)
# Uncheck it
app.ChooseColumns.ListView.UnCheck(1)
time.sleep(.5)
print "Should not be checked now:", app.ChooseColumsn.ListView.IsChecked(1)
# Check it again
app.ChooseColumns.ListView.Check(1)
time.sleep(.5)
app.ChooseColumsn.Cancel.Click()
app.WindowsMediaPlayer.MenuSelect("File->Exit")
def Main():
start = time.time()
WindowsMedia()
print "Total time taken:", time.time() - start
if __name__ == "__main__":
Main() | lgpl-2.1 | 2,975,419,718,908,480,500 | 27.022472 | 79 | 0.676095 | false |
rturumella/CloudBot | plugins/reddit.py | 2 | 3282 | from datetime import datetime
import re
import random
import asyncio
import functools
import urllib.parse
import requests
from cloudbot import hook
from cloudbot.util import timeformat, formatting
reddit_re = re.compile(r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I)
base_url = "http://reddit.com/r/{}/.json"
short_url = "http://redd.it/{}"
def format_output(item, show_url=False):
""" takes a reddit post and returns a formatted sting """
item["title"] = formatting.truncate(item["title"], 50)
item["link"] = short_url.format(item["id"])
raw_time = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timeformat.timesince(raw_time, count=1)
item["comments"] = formatting.pluralize(item["num_comments"], 'comment')
item["points"] = formatting.pluralize(item["score"], 'point')
if item["over_18"]:
item["warning"] = " \x02NSFW\x02"
else:
item["warning"] = ""
if show_url:
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {comments}, {points} -" \
" {link}{warning}".format(**item)
else:
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {comments}, {points}{warning}".format(**item)
@hook.regex(reddit_re)
def reddit_url(match, bot):
url = match.group(1)
if "redd.it" in url:
url = "http://" + url
response = requests.get(url)
url = response.url + "/.json"
if not urllib.parse.urlparse(url).scheme:
url = "http://" + url + "/.json"
# the reddit API gets grumpy if we don't include headers
headers = {'User-Agent': bot.user_agent}
r = requests.get(url, headers=headers)
data = r.json()
item = data[0]["data"]["children"][0]["data"]
return format_output(item)
@asyncio.coroutine
@hook.command(autohelp=False)
def reddit(text, bot, loop):
"""<subreddit> [n] - gets a random post from <subreddit>, or gets the [n]th post in the subreddit"""
id_num = None
headers = {'User-Agent': bot.user_agent}
if text:
# clean and split the input
parts = text.lower().strip().split()
# find the requested post number (if any)
if len(parts) > 1:
url = base_url.format(parts[0].strip())
try:
id_num = int(parts[1]) - 1
except ValueError:
return "Invalid post number."
else:
url = base_url.format(parts[0].strip())
else:
url = "http://reddit.com/.json"
try:
# Again, identify with Reddit using an User Agent, otherwise get a 429
inquiry = yield from loop.run_in_executor(None, functools.partial(requests.get, url, headers=headers))
data = inquiry.json()
except Exception as e:
return "Error: " + str(e)
data = data["data"]["children"]
# get the requested/random post
if id_num is not None:
try:
item = data[id_num]["data"]
except IndexError:
length = len(data)
return "Invalid post number. Number must be between 1 and {}.".format(length)
else:
item = random.choice(data)["data"]
return format_output(item, show_url=True)
| gpl-3.0 | 2,631,840,621,863,877,600 | 30.257143 | 110 | 0.594455 | false |
f2nd/yandex-tank | yandextank/stepper/tests/test_load_plan.py | 4 | 6624 | import pytest
from yandextank.stepper.load_plan import create, Const, Line, Composite, Stairway, StepFactory
from yandextank.stepper.util import take
class TestLine(object):
def test_get_rps_list(self):
lp = create(["line(1, 100, 10s)"])
rps_list = lp.get_rps_list()
assert len(rps_list) == 11
assert rps_list[-1][0] == 100
@pytest.mark.parametrize(
"rps, duration, rps_list",
[(100, 3000, [(100, 3)]), (0, 3000, [(0, 3)]), (100, 0, [(100, 0)])])
class TestConst(object):
@pytest.mark.parametrize(
"check_point, expected",
[(lambda duration: 0, lambda rps: rps),
(lambda duration: duration / 2, lambda rps: rps),
(lambda duration: duration + 1, lambda rps: 0),
(lambda duration: -1, lambda rps: 0)])
def test_rps_at(self, rps, duration, rps_list, check_point, expected):
assert Const(rps,
duration).rps_at(check_point(duration)) == expected(rps)
def test_get_rps_list(self, rps, duration, rps_list):
assert Const(rps, duration).get_rps_list() == rps_list
assert isinstance(rps_list[0][1], int)
class TestLineNew(object):
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[(0, 10, 30 * 1000, 0, 0), (0, 10, 30 * 1000, 10, 3),
(0, 10, 30 * 1000, 29, 10), (9, 10, 30 * 1000, 1, 9),
(9, 10, 30 * 1000, 20, 10)])
def test_rps_at(self, min_rps, max_rps, duration, check_point, expected):
assert round(Line(min_rps, max_rps, duration).rps_at(
check_point)) == expected
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[
(0, 10, 20 * 1000, 9, (9, 2)),
(0, 10, 30 * 1000, 0, (0, 2)),
(0, 10, 30 * 1000, 5, (5, 3)),
(0, 10, 30 * 1000, 10, (10, 2)),
(0, 10, 3 * 1000, 0, (0, 1)),
(0, 10, 3 * 1000, 1, (3, 1)),
(0, 10, 3 * 1000, 2, (7, 1)),
(0, 10, 3 * 1000, 3, (10, 1)),
(9, 10, 30 * 1000, 0, (9, 15)),
(9, 10, 30 * 1000, 1, (10, 16)),
(10, 10, 30 * 1000, 0, (10, 31)), # strange
(10, 0, 30 * 1000, 0, (10, 2)),
(10, 0, 30 * 1000, 1, (9, 3)),
(10, 0, 30 * 1000, 9, (1, 3)),
(10, 0, 30 * 1000, 10, (0, 2)),
])
def test_get_rps_list(
self, min_rps, max_rps, duration, check_point, expected):
assert Line(min_rps, max_rps,
duration).get_rps_list()[check_point] == expected
@pytest.mark.parametrize(
"min_rps, max_rps, duration, expected_len, threshold, len_above_threshold",
[
(2, 12, 25000, 175, 5000, 160),
(2, 12, 25000, 175, 10000, 135),
(2, 12, 25000, 175, 15000, 100),
(2, 12, 25000, 175, 20000, 55),
(0, 10, 25000, 125, 15000, 80),
(10, 12, 20000, 220, 10000, 115),
(10, 10, 20000, 200, 10000, 100),
(10, 0, 25000, 125, 10000, 45),
(10, 0, 25000, 125, 15000, 20),
])
def test_iter(
self, min_rps, max_rps, duration, expected_len, threshold,
len_above_threshold):
load_plan = Line(min_rps, max_rps, duration)
assert len(load_plan) == expected_len
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestComposite(object):
@pytest.mark.parametrize(
"steps, expected_len", [([Line(0, 10, 20000), Const(10, 10000)], 200),
([Line(0, 10, 20000), Line(10, 0, 20000)], 200),
([Const(5, 10000), Const(10, 5000)], 100)])
def test_iter(self, steps, expected_len):
assert len(Composite(steps)) == expected_len
@pytest.mark.parametrize(
"steps, check_point, expected", [
([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)),
])
def test_rps_list(self, steps, check_point, expected):
assert Composite(steps).get_rps_list()[check_point] == expected
class TestStairway(object):
@pytest.mark.parametrize(
"min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold",
[(0, 1000, 50, 3000, 31500, 9000, 31050),
(0, 1000, 50, 3000, 31500, 15000, 30000),
(0, 1000, 50, 3000, 31500, 45000, 15750)])
def test_iter(
self, min_rps, max_rps, increment, step_duration, expected_len,
threshold, len_above_threshold):
load_plan = Stairway(min_rps, max_rps, increment, step_duration)
assert len(load_plan) == expected_len
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestCreate(object):
@pytest.mark.parametrize(
'rps_schedule, check_point, expected', [
(['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]),
(['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]),
(['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]),
(['const(1, 10s)'], 100,
[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]),
(['const(200, 0.1s)'], 100, [
0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95
]),
(['const(1, 2s)', 'const(2, 2s)'], 100,
[0, 1000, 2000, 2500, 3000, 3500]),
(['const(1.5, 10s)'], 100, [
0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666,
7333, 8000, 8666, 9333
]),
(['step(1, 5, 1, 5s)'], 10,
[0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]),
(['step(1.2, 5.7, 1.1, 5s)'], 10,
[0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]),
(['const(1, 1)'], 10, [0]),
])
def test_create(self, rps_schedule, check_point, expected):
# pytest.set_trace()
assert take(check_point, (create(rps_schedule))) == expected
# ([0-9.]+d)?([0-9.]+h)?([0-9.]+m)?([0-9.]+s)?
@pytest.mark.parametrize('step_config, expected_duration', [
('line(1,500,1m30s)', 90),
('const(50,1h30s)', 3630 * 1000),
('step(10,200,10,1h20m)', 4800 * 1000)
])
def test_step_factory(step_config, expected_duration):
steps = StepFactory.produce(step_config)
assert steps.duration == expected_duration
| lgpl-2.1 | 525,112,394,669,774,100 | 40.660377 | 99 | 0.513738 | false |
sjohns09/MSRDM | vendor/googletest/googlemock/scripts/fuse_gmock_files.py | 242 | 8631 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to [email protected]. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| lgpl-3.0 | -7,221,021,279,349,034,000 | 34.9625 | 79 | 0.695516 | false |
nathanial/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/tests/geogapp/tests.py | 222 | 4080 | """
Tests for geography support in PostGIS 1.5+
"""
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
from django.contrib.gis.measure import A
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| gpl-3.0 | 8,008,189,737,143,786,000 | 45.896552 | 105 | 0.628922 | false |
cloud-engineering/wifi | db.py | 1 | 2100 | # Python Standard Library Imports
import logging
# External Imports
from sqlalchemy import Column
from sqlalchemy import String, INTEGER, FLOAT
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
# Custom Imports
import config
import json
engine = create_engine('sqlite:///' + config.db.PATH, echo=config.db.DEBUG)
DeclarativeBase = declarative_base(engine)
Session = scoped_session(sessionmaker(engine))
class Event(DeclarativeBase):
__tablename__ = 'events'
id = Column(INTEGER, primary_key=True)
device_serial = Column(String)
mode = Column(INTEGER)
time_stamp = Column(INTEGER)
phaseA = Column(FLOAT)
phaseB = Column(FLOAT)
phaseC = Column(FLOAT)
voltage = Column(FLOAT)
def __init__(self, device_serial, mode, time_stamp, phaseA, phaseB, phaseC, voltage):
self.device_serial = device_serial
self.mode = mode
self.time_stamp = time_stamp
self.phaseA = phaseA
self.phaseB = phaseB
self.phaseC = phaseC
self.voltage = voltage
'''
After spending more than 2 days in data formating, it turns out python suprised me again with its simplicity.
_repr_ function can serialise a standard python dictionary I created into a json object and return it.
I can now recieve serialezed json each time I query against the database - thanks to SQLAlchemy - see implementation below.
'''
def __repr__(self):
return json.dumps({
"DeviceSerial" : self.device_serial,
"Mode" : self.mode,
"Events" : [{"TimeStamp":self.time_stamp,"PhaseA":self.phaseA,"PhaseB":self.phaseB,"PhaseC":self.phaseC,"Voltage":self.voltage}]
})
#'(%s,%d, %d, %f, %f, %f, %f0' % (self.device_serial, self.mode, self.time_stamp, self.phase_1, self.phase_2, self.phase_3, self.voltage)
def initialise():
logging.info('Initialising the database.')
DeclarativeBase.metadata.create_all()
Session.execute('PRAGMA journal_mode = WAL') | mit | -8,845,268,197,298,115,000 | 36.517857 | 144 | 0.681905 | false |
yanikou19/pymatgen | pymatgen/io/aseio.py | 3 | 1743 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)
@staticmethod
def get_structure(atoms):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
return Structure(lattice, symbols, positions,
coords_are_cartesian=True)
| mit | 4,566,351,220,466,697,700 | 25.815385 | 79 | 0.63798 | false |
uchuugaka/anaconda | anaconda_lib/jedi/parser/__init__.py | 38 | 16804 | """
The ``Parser`` tries to convert the available Python code in an easy to read
format, something like an abstract syntax tree. The classes who represent this
tree, are sitting in the :mod:`jedi.parser.tree` module.
The Python module ``tokenize`` is a very important part in the ``Parser``,
because it splits the code into different words (tokens). Sometimes it looks a
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
module for this? Well, ``ast`` does a very good job understanding proper Python
code, but fails to work as soon as there's a single line of broken code.
There's one important optimization that needs to be known: Statements are not
being parsed completely. ``Statement`` is just a representation of the tokens
within the statement. This lowers memory usage and cpu time and reduces the
complexity of the ``Parser`` (there's another parser sitting inside
``Statement``, which produces ``Array`` and ``Call``).
"""
import os
import re
from jedi.parser import tree as pt
from jedi.parser import tokenize
from jedi.parser import token
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, OP, ERRORTOKEN)
from jedi.parser.pgen2.pgen import generate_grammar
from jedi.parser.pgen2.parse import PgenParser
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
# Not used yet. In the future I intend to add something like KeywordStatement
STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
'return', 'yield', 'pass', 'continue', 'break'
_loaded_grammars = {}
def load_grammar(file='grammar3.4'):
# For now we only support two different Python syntax versions: The latest
# Python 3 and Python 2. This may change.
if file.startswith('grammar3'):
file = 'grammar3.4'
else:
file = 'grammar2.7'
global _loaded_grammars
path = os.path.join(os.path.dirname(__file__), file) + '.txt'
try:
return _loaded_grammars[path]
except KeyError:
return _loaded_grammars.setdefault(path, generate_grammar(path))
class ErrorStatement(object):
def __init__(self, stack, next_token, position_modifier, next_start_pos):
self.stack = stack
self._position_modifier = position_modifier
self.next_token = next_token
self._next_start_pos = next_start_pos
@property
def next_start_pos(self):
s = self._next_start_pos
return s[0] + self._position_modifier.line, s[1]
@property
def first_pos(self):
first_type, nodes = self.stack[0]
return nodes[0].start_pos
@property
def first_type(self):
first_type, nodes = self.stack[0]
return first_type
class ParserSyntaxError(object):
def __init__(self, message, position):
self.message = message
self.position = position
class Parser(object):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
:param source: The codebase for the parser. Must be unicode.
:param module_path: The path of the module in the file system, may be None.
:type module_path: str
:param top_module: Use this module as a parent instead of `self.module`.
"""
def __init__(self, grammar, source, module_path=None, tokenizer=None):
self._ast_mapping = {
'expr_stmt': pt.ExprStmt,
'classdef': pt.Class,
'funcdef': pt.Function,
'file_input': pt.Module,
'import_name': pt.ImportName,
'import_from': pt.ImportFrom,
'break_stmt': pt.KeywordStatement,
'continue_stmt': pt.KeywordStatement,
'return_stmt': pt.ReturnStmt,
'raise_stmt': pt.KeywordStatement,
'yield_expr': pt.YieldExpr,
'del_stmt': pt.KeywordStatement,
'pass_stmt': pt.KeywordStatement,
'global_stmt': pt.GlobalStmt,
'nonlocal_stmt': pt.KeywordStatement,
'assert_stmt': pt.AssertStmt,
'if_stmt': pt.IfStmt,
'with_stmt': pt.WithStmt,
'for_stmt': pt.ForStmt,
'while_stmt': pt.WhileStmt,
'try_stmt': pt.TryStmt,
'comp_for': pt.CompFor,
'decorator': pt.Decorator,
'lambdef': pt.Lambda,
'old_lambdef': pt.Lambda,
'lambdef_nocond': pt.Lambda,
}
self.syntax_errors = []
self._global_names = []
self._omit_dedent_list = []
self._indent_counter = 0
self._last_failed_start_pos = (0, 0)
# TODO do print absolute import detection here.
#try:
# del python_grammar_no_print_statement.keywords["print"]
#except KeyError:
# pass # Doesn't exist in the Python 3 grammar.
#if self.options["print_function"]:
# python_grammar = pygram.python_grammar_no_print_statement
#else:
self._used_names = {}
self._scope_names_stack = [{}]
self._error_statement_stacks = []
added_newline = False
# The Python grammar needs a newline at the end of each statement.
if not source.endswith('\n'):
source += '\n'
added_newline = True
# For the fast parser.
self.position_modifier = pt.PositionModifier()
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
self.error_recovery)
tokenizer = tokenizer or tokenize.source_tokens(source)
self.module = p.parse(self._tokenize(tokenizer))
if self.module.type != 'file_input':
# If there's only one statement, we get back a non-module. That's
# not what we want, we want a module, so we add it here:
self.module = self.convert_node(grammar,
grammar.symbol2number['file_input'],
[self.module])
if added_newline:
self.remove_last_newline()
self.module.used_names = self._used_names
self.module.path = module_path
self.module.global_names = self._global_names
self.module.error_statement_stacks = self._error_statement_stacks
def convert_node(self, grammar, type, children):
"""
Convert raw node information to a Node instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
symbol = grammar.number2symbol[type]
try:
new_node = self._ast_mapping[symbol](children)
except KeyError:
new_node = pt.Node(symbol, children)
# We need to check raw_node always, because the same node can be
# returned by convert multiple times.
if symbol == 'global_stmt':
self._global_names += new_node.get_global_names()
elif isinstance(new_node, pt.Lambda):
new_node.names_dict = self._scope_names_stack.pop()
elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \
and symbol in ('funcdef', 'classdef', 'file_input'):
# scope_name_stack handling
scope_names = self._scope_names_stack.pop()
if isinstance(new_node, pt.ClassOrFunc):
n = new_node.name
scope_names[n.value].remove(n)
# Set the func name of the current node
arr = self._scope_names_stack[-1].setdefault(n.value, [])
arr.append(n)
new_node.names_dict = scope_names
elif isinstance(new_node, pt.CompFor):
# The name definitions of comprehenions shouldn't be part of the
# current scope. They are part of the comprehension scope.
for n in new_node.get_defined_names():
self._scope_names_stack[-1][n.value].remove(n)
return new_node
def convert_leaf(self, grammar, type, value, prefix, start_pos):
#print('leaf', value, pytree.type_repr(type))
if type == tokenize.NAME:
if value in grammar.keywords:
if value in ('def', 'class', 'lambda'):
self._scope_names_stack.append({})
return pt.Keyword(self.position_modifier, value, start_pos, prefix)
else:
name = pt.Name(self.position_modifier, value, start_pos, prefix)
# Keep a listing of all used names
arr = self._used_names.setdefault(name.value, [])
arr.append(name)
arr = self._scope_names_stack[-1].setdefault(name.value, [])
arr.append(name)
return name
elif type == STRING:
return pt.String(self.position_modifier, value, start_pos, prefix)
elif type == NUMBER:
return pt.Number(self.position_modifier, value, start_pos, prefix)
elif type in (NEWLINE, ENDMARKER):
return pt.Whitespace(self.position_modifier, value, start_pos, prefix)
else:
return pt.Operator(self.position_modifier, value, start_pos, prefix)
def error_recovery(self, grammar, stack, typ, value, start_pos, prefix,
add_token_callback):
"""
This parser is written in a dynamic way, meaning that this parser
allows using different grammars (even non-Python). However, error
recovery is purely written for Python.
"""
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for index, (dfa, state, (typ, nodes)) in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
symbol = grammar.number2symbol[typ]
if symbol == 'file_input':
break
elif symbol == 'suite' and len(nodes) > 1:
# suites without an indent in them get discarded.
break
elif symbol == 'simple_stmt' and len(nodes) > 1:
# simple_stmt can just be turned into a Node, if there are
# enough statements. Ignore the rest after that.
break
return index, symbol, nodes
index, symbol, nodes = current_suite(stack)
if symbol == 'simple_stmt':
index -= 2
(_, _, (typ, suite_nodes)) = stack[index]
symbol = grammar.number2symbol[typ]
suite_nodes.append(pt.Node(symbol, list(nodes)))
# Remove
nodes[:] = []
nodes = suite_nodes
stack[index]
#print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index)
self._stack_removal(grammar, stack, index + 1, value, start_pos)
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'):
# Those can always be new statements.
add_token_callback(typ, value, prefix, start_pos)
elif typ == DEDENT and symbol == 'suite':
# Close the current suite, with DEDENT.
# Note that this may cause some suites to not contain any
# statements at all. This is contrary to valid Python syntax. We
# keep incomplete suites in Jedi to be able to complete param names
# or `with ... as foo` names. If we want to use this parser for
# syntax checks, we have to check in a separate turn if suites
# contain statements or not. However, a second check is necessary
# anyway (compile.c does that for Python), because Python's grammar
# doesn't stop you from defining `continue` in a module, etc.
add_token_callback(typ, value, prefix, start_pos)
def _stack_removal(self, grammar, stack, start_index, value, start_pos):
def clear_names(children):
for c in children:
try:
clear_names(c.children)
except AttributeError:
if isinstance(c, pt.Name):
try:
self._scope_names_stack[-1][c.value].remove(c)
self._used_names[c.value].remove(c)
except ValueError:
pass # This may happen with CompFor.
for dfa, state, node in stack[start_index:]:
clear_names(children=node[1])
failed_stack = []
found = False
for dfa, state, (typ, nodes) in stack[start_index:]:
if nodes:
found = True
if found:
symbol = grammar.number2symbol[typ]
failed_stack.append((symbol, nodes))
if nodes and nodes[0] in ('def', 'class', 'lambda'):
self._scope_names_stack.pop()
if failed_stack:
err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos)
self._error_statement_stacks.append(err)
self._last_failed_start_pos = start_pos
stack[start_index:] = []
def _tokenize(self, tokenizer):
for typ, value, start_pos, prefix in tokenizer:
#print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
elif typ == ERRORTOKEN:
self._add_syntax_error('Strange token', start_pos)
continue
if typ == OP:
typ = token.opmap[value]
yield typ, value, prefix, start_pos
def _add_syntax_error(self, message, position):
self.syntax_errors.append(ParserSyntaxError(message, position))
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, self.module)
def remove_last_newline(self):
"""
In all of this we need to work with _start_pos, because if we worked
with start_pos, we would need to check the position_modifier as well
(which is accounted for in the start_pos property).
"""
endmarker = self.module.children[-1]
# The newline is either in the endmarker as a prefix or the previous
# leaf as a newline token.
if endmarker.prefix.endswith('\n'):
endmarker.prefix = endmarker.prefix[:-1]
last_line = re.sub('.*\n', '', endmarker.prefix)
endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line)
else:
try:
newline = endmarker.get_previous()
except IndexError:
return # This means that the parser is empty.
while True:
if newline.value == '':
# Must be a DEDENT, just continue.
try:
newline = newline.get_previous()
except IndexError:
# If there's a statement that fails to be parsed, there
# will be no previous leaf. So just ignore it.
break
elif newline.value != '\n':
# This may happen if error correction strikes and removes
# a whole statement including '\n'.
break
else:
newline.value = ''
if self._last_failed_start_pos > newline._start_pos:
# It may be the case that there was a syntax error in a
# function. In that case error correction removes the
# right newline. So we use the previously assigned
# _last_failed_start_pos variable to account for that.
endmarker._start_pos = self._last_failed_start_pos
else:
endmarker._start_pos = newline._start_pos
break
| gpl-3.0 | 6,763,392,414,066,380,000 | 41.75827 | 88 | 0.571709 | false |
bigdatauniversity/edx-platform | lms/lib/courseware_search/lms_filter_generator.py | 43 | 2452 | """
This file contains implementation override of SearchFilterGenerator which will allow
* Filter by all courses in which the user is enrolled in
"""
from microsite_configuration import microsite
from student.models import CourseEnrollment
from search.filter_generator import SearchFilterGenerator
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
INCLUDE_SCHEMES = [CohortPartitionScheme, RandomUserPartitionScheme, ]
SCHEME_SUPPORTS_ASSIGNMENT = [RandomUserPartitionScheme, ]
class LmsSearchFilterGenerator(SearchFilterGenerator):
""" SearchFilterGenerator for LMS Search """
_user_enrollments = {}
def _enrollments_for_user(self, user):
""" Return the specified user's course enrollments """
if user not in self._user_enrollments:
self._user_enrollments[user] = CourseEnrollment.enrollments_for_user(user)
return self._user_enrollments[user]
def field_dictionary(self, **kwargs):
""" add course if provided otherwise add courses in which the user is enrolled in """
field_dictionary = super(LmsSearchFilterGenerator, self).field_dictionary(**kwargs)
if not kwargs.get('user'):
field_dictionary['course'] = []
elif not kwargs.get('course_id'):
user_enrollments = self._enrollments_for_user(kwargs['user'])
field_dictionary['course'] = [unicode(enrollment.course_id) for enrollment in user_enrollments]
# if we have an org filter, only include results for this org filter
course_org_filter = microsite.get_value('course_org_filter')
if course_org_filter:
field_dictionary['org'] = course_org_filter
return field_dictionary
def exclude_dictionary(self, **kwargs):
""" If we are not on a microsite, then exclude any microsites that are defined """
exclude_dictionary = super(LmsSearchFilterGenerator, self).exclude_dictionary(**kwargs)
course_org_filter = microsite.get_value('course_org_filter')
# If we have a course filter we are ensuring that we only get those courses above
if not course_org_filter:
org_filter_out_set = microsite.get_all_orgs()
if org_filter_out_set:
exclude_dictionary['org'] = list(org_filter_out_set)
return exclude_dictionary
| agpl-3.0 | 4,829,496,862,452,984,000 | 44.407407 | 107 | 0.705546 | false |
Mirantis/mos-horizon | openstack_dashboard/templatetags/themes.py | 14 | 2548 | # Copyright 2016 Hewlett Packard Enterprise Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
from six.moves.urllib.request import pathname2url
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django import template
from horizon import themes as hz_themes
register = template.Library()
def get_theme(request):
this_theme = hz_themes.get_default_theme()
try:
theme = request.COOKIES[hz_themes.get_theme_cookie_name()]
for each_theme in hz_themes.get_themes():
if theme == each_theme[0]:
this_theme = each_theme[0]
except KeyError:
pass
return this_theme
def find_asset(theme, asset):
theme_path = ''
for name, label, path in hz_themes.get_themes():
if theme == name:
theme_path = path
theme_path = os.path.join(settings.ROOT_PATH, theme_path)
# If there is a 'static' subdir of the theme, then use
# that as the theme's asset root path
static_path = os.path.join(theme_path, 'static')
if os.path.exists(static_path):
theme_path = static_path
# The full path to the asset requested
asset_path = os.path.join(theme_path, asset)
if os.path.exists(asset_path):
return_path = os.path.join(hz_themes.get_theme_dir(), theme, asset)
else:
return_path = os.path.join('dashboard', asset)
return staticfiles_storage.url(pathname2url(return_path))
@register.assignment_tag()
def themes():
return hz_themes.get_themes()
@register.assignment_tag()
def theme_cookie():
return hz_themes.get_theme_cookie_name()
@register.assignment_tag()
def theme_dir():
return hz_themes.get_theme_dir()
@register.assignment_tag(takes_context=True)
def current_theme(context):
return get_theme(context.request)
@register.simple_tag(takes_context=True)
def themable_asset(context, asset):
return find_asset(get_theme(context.request), asset)
| apache-2.0 | -6,980,886,957,284,060,000 | 27 | 75 | 0.704474 | false |
richard-willowit/odoo | addons/web/models/ir_http.py | 6 | 2130 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import models
from odoo.http import request
import odoo
class Http(models.AbstractModel):
_inherit = 'ir.http'
def webclient_rendering_context(self):
return {
'menu_data': request.env['ir.ui.menu'].load_menus(request.debug),
'session_info': json.dumps(self.session_info()),
}
def session_info(self):
user = request.env.user
display_switch_company_menu = user.has_group('base.group_multi_company') and len(user.company_ids) > 1
version_info = odoo.service.common.exp_version()
return {
"session_id": request.session.sid,
"uid": request.session.uid,
"is_system": request.env.user._is_system(),
"is_superuser": request.env.user._is_superuser(),
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"server_version": version_info.get('server_version'),
"server_version_info": version_info.get('server_version_info'),
"name": user.name,
"username": user.login,
"company_id": request.env.user.company_id.id if request.session.uid else None,
"partner_id": request.env.user.partner_id.id if request.session.uid and request.env.user.partner_id else None,
"user_companies": {'current_company': (user.company_id.id, user.company_id.name), 'allowed_companies': [(comp.id, comp.name) for comp in user.company_ids]} if display_switch_company_menu else False,
"currencies": self.get_currencies(),
"web.base.url": self.env['ir.config_parameter'].sudo().get_param('web.base.url', default=''),
}
def get_currencies(self):
Currency = request.env['res.currency']
currencies = Currency.search([]).read(['symbol', 'position', 'decimal_places'])
return { c['id']: {'symbol': c['symbol'], 'position': c['position'], 'digits': [69,c['decimal_places']]} for c in currencies}
| gpl-3.0 | 8,066,572,760,612,700,000 | 45.304348 | 210 | 0.621596 | false |
alope107/nbgrader | nbgrader/tests/api/test_gradebook.py | 4 | 25911 | import pytest
from datetime import datetime
from nbgrader import api
from nbgrader import utils
from nbgrader.api import InvalidEntry, MissingEntry
@pytest.fixture
def gradebook(request):
gb = api.Gradebook("sqlite:///:memory:")
def fin():
gb.db.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def assignment(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='markdown')
gradebook.add_solution_cell('solution1', 'p1', 'foo')
gradebook.add_solution_cell('test2', 'p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type='code')
gradebook.add_source_cell('test2', 'p1', 'foo', cell_type='markdown')
gradebook.add_source_cell('solution1', 'p1', 'foo', cell_type='code')
return gradebook
def test_init(gradebook):
assert gradebook.students == []
assert gradebook.assignments == []
#### Test students
def test_add_student(gradebook):
s = gradebook.add_student('12345')
assert s.id == '12345'
assert gradebook.students == [s]
# try adding a duplicate student
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
# try adding a student with arguments
s = gradebook.add_student('6789', last_name="Bar", first_name="Foo", email="[email protected]")
assert s.id == '6789'
assert s.last_name == "Bar"
assert s.first_name == "Foo"
assert s.email == "[email protected]"
def test_add_duplicate_student(gradebook):
# we also need this test because this will cause an IntegrityError
# under the hood rather than a FlushError
gradebook.add_student('12345')
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
def test_find_student(gradebook):
s1 = gradebook.add_student('12345')
assert gradebook.find_student('12345') == s1
s2 = gradebook.add_student('abcd')
assert gradebook.find_student('12345') == s1
assert gradebook.find_student('abcd') == s2
def test_find_nonexistant_student(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_student('12345')
def test_remove_student(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
assignment.remove_student('hacker123')
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
with pytest.raises(MissingEntry):
assignment.find_student('hacker123')
def test_update_or_create_student(gradebook):
# first test creating it
s1 = gradebook.update_or_create_student('hacker123')
assert gradebook.find_student('hacker123') == s1
assert s1.first_name is None
# now test finding/updating it
s2 = gradebook.update_or_create_student('hacker123', first_name='Alyssa')
assert s1 == s2
assert s2.first_name == 'Alyssa'
#### Test assignments
def test_add_assignment(gradebook):
a = gradebook.add_assignment('foo')
assert a.name == 'foo'
assert gradebook.assignments == [a]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
# try adding an assignment with arguments
now = datetime.now()
a = gradebook.add_assignment('bar', duedate=now)
assert a.name == 'bar'
assert a.duedate == now
# try adding with a string timestamp
a = gradebook.add_assignment('baz', duedate=now.isoformat())
assert a.name == 'baz'
assert a.duedate == now
def test_add_duplicate_assignment(gradebook):
gradebook.add_assignment('foo')
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
def test_find_assignment(gradebook):
a1 = gradebook.add_assignment('foo')
assert gradebook.find_assignment('foo') == a1
a2 = gradebook.add_assignment('bar')
assert gradebook.find_assignment('foo') == a1
assert gradebook.find_assignment('bar') == a2
def test_find_nonexistant_assignment(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_assignment('foo')
def test_remove_assignment(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
grade_cells = [x for nb in notebooks for x in nb.grade_cells]
solution_cells = [x for nb in notebooks for x in nb.solution_cells]
source_cells = [x for nb in notebooks for x in nb.source_cells]
assignment.remove_assignment('foo')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_assignment('foo')
assert assignment.find_student('hacker123').submissions == []
def test_update_or_create_assignment(gradebook):
# first test creating it
a1 = gradebook.update_or_create_assignment('foo')
assert gradebook.find_assignment('foo') == a1
assert a1.duedate is None
# now test finding/updating it
a2 = gradebook.update_or_create_assignment('foo', duedate="2015-02-02 14:58:23.948203 PST")
assert a1 == a2
assert a2.duedate == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
#### Test notebooks
def test_add_notebook(gradebook):
a = gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
assert n.name == 'p1'
assert n.assignment == a
assert a.notebooks == [n]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_add_duplicate_notebook(gradebook):
# it should be ok to add a notebook with the same name, as long as
# it's for different assignments
gradebook.add_assignment('foo')
gradebook.add_assignment('bar')
n1 = gradebook.add_notebook('p1', 'foo')
n2 = gradebook.add_notebook('p1', 'bar')
assert n1.id != n2.id
# but not ok to add a notebook with the same name for the same assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_find_notebook(gradebook):
gradebook.add_assignment('foo')
n1 = gradebook.add_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
n2 = gradebook.add_notebook('p2', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
assert gradebook.find_notebook('p2', 'foo') == n2
def test_find_nonexistant_notebook(gradebook):
# check that it doesn't find it when there is nothing in the db
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
# check that it doesn't find it even if the assignment exists
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
def test_update_or_create_notebook(gradebook):
# first test creating it
gradebook.add_assignment('foo')
n1 = gradebook.update_or_create_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
# now test finding/updating it
n2 = gradebook.update_or_create_notebook('p1', 'foo')
assert n1 == n2
def test_remove_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
for nb in notebooks:
grade_cells = [x for x in nb.grade_cells]
solution_cells = [x for x in nb.solution_cells]
source_cells = [x for x in nb.source_cells]
assignment.remove_notebook(nb.name, 'foo')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_notebook(nb.name, 'foo')
#### Test grade cells
def test_add_grade_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
assert gc.name == 'test1'
assert gc.max_score == 2
assert gc.cell_type == 'markdown'
assert n.grade_cells == [gc]
assert gc.notebook == n
def test_add_grade_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="code")
assert gc.name == 'test1'
assert gc.max_score == 3
assert gc.cell_type == "code"
def test_create_invalid_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="something")
def test_add_duplicate_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
def test_find_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
gc2 = gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
assert gradebook.find_grade_cell('test2', 'p1', 'foo') == gc2
def test_find_nonexistant_grade_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
def test_update_or_create_grade_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='code')
assert gc1.max_score == 2
assert gc1.cell_type == 'code'
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
# now test finding/updating it
gc2 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=3)
assert gc1 == gc2
assert gc1.max_score == 3
assert gc1.cell_type == 'code'
#### Test solution cells
def test_add_solution_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert sc.name == 'test1'
assert n.solution_cells == [sc]
assert sc.notebook == n
def test_add_duplicate_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_solution_cell('test1', 'p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_solution_cell('test1', 'p1', 'foo')
def test_find_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_solution_cell('test2', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_solution_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_solution_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
def test_update_or_create_solution_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
sc2 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert sc1 == sc2
#### Test source cells
def test_add_source_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert sc.name == 'test1'
assert sc.cell_type == 'code'
assert n.source_cells == [sc]
assert sc.notebook == n
def test_add_source_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="code", checksum="abcde")
assert sc.name == 'test1'
assert sc.source == "blah blah blah"
assert sc.cell_type == "code"
assert sc.checksum == "abcde"
def test_create_invalid_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="something", checksum="abcde")
def test_add_duplicate_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
with pytest.raises(InvalidEntry):
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
def test_find_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_source_cell('test2', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_source_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_source_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
def test_update_or_create_source_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', cell_type='code')
assert sc1.cell_type == 'code'
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
assert sc1.checksum == None
sc2 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', checksum="123456")
assert sc1 == sc2
assert sc1.cell_type == 'code'
assert sc1.checksum == "123456"
#### Test submissions
def test_add_submission(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
assert assignment.assignment_submissions('foo') == [s2, s1]
assert assignment.student_submissions('hacker123') == [s1]
assert assignment.student_submissions('bitdiddle') == [s2]
assert assignment.find_submission('foo', 'hacker123') == s1
assert assignment.find_submission('foo', 'bitdiddle') == s2
def test_add_duplicate_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
with pytest.raises(InvalidEntry):
assignment.add_submission('foo', 'hacker123')
def test_remove_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
grades = [x for nb in notebooks for x in nb.grades]
comments = [x for nb in notebooks for x in nb.comments]
assignment.remove_submission('foo', 'hacker123')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
def test_update_or_create_submission(assignment):
assignment.add_student('hacker123')
s1 = assignment.update_or_create_submission('foo', 'hacker123')
assert s1.timestamp is None
s2 = assignment.update_or_create_submission('foo', 'hacker123', timestamp="2015-02-02 14:58:23.948203 PST")
assert s1 == s2
assert s2.timestamp == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
def test_find_submission_notebook(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook('p2', 'foo', 'hacker123')
n2 = assignment.find_submission_notebook('p1', 'foo', 'hacker123')
assert n1 == n2
def test_find_submission_notebook_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook_by_id('12345')
n2 = assignment.find_submission_notebook_by_id(n1.id)
assert n1 == n2
def test_remove_submission_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
for nb in notebooks:
grades = [x for x in nb.grades]
comments = [x for x in nb.comments]
assignment.remove_submission_notebook(nb.name, 'foo', 'hacker123')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission_notebook(nb.name, 'foo', 'hacker123')
def test_find_grade(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade(g1.name, 'p1', 'foo', 'hacker123')
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade('asdf', 'p1', 'foo', 'hacker123')
def test_find_grade_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade_by_id(g1.id)
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade_by_id('12345')
def test_find_comment(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment(c1.name, 'p1', 'foo', 'hacker123')
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment('asdf', 'p1', 'foo', 'hacker123')
def test_find_comment_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment_by_id(c1.id)
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment_by_id('12345')
### Test average scores
def test_average_assignment_score(assignment):
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_assignment_score('foo') == 2.25
assert assignment.average_assignment_code_score('foo') == 0.75
assert assignment.average_assignment_written_score('foo') == 1.5
def test_average_notebook_score(assignment):
assert assignment.average_notebook_score('p1', 'foo') == 0
assert assignment.average_notebook_code_score('p1', 'foo') == 0
assert assignment.average_notebook_written_score('p1', 'foo') == 0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_notebook_score('p1', 'foo') == 0.0
assert assignment.average_notebook_code_score('p1', 'foo') == 0.0
assert assignment.average_notebook_written_score('p1', 'foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_notebook_score('p1', 'foo') == 2.25
assert assignment.average_notebook_code_score('p1', 'foo') == 0.75
assert assignment.average_notebook_written_score('p1', 'foo') == 1.5
## Test mass dictionary queries
def test_student_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_student('louisreasoner')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
students = assignment.student_dicts()
a = sorted(students, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in assignment.students], key=lambda x: x["id"])
assert a == b
def test_notebook_submission_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
s1.flagged = True
s2.flagged = False
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
notebook = assignment.find_notebook("p1", "foo")
submissions = assignment.notebook_submission_dicts("p1", "foo")
a = sorted(submissions, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"])
assert a == b
| bsd-3-clause | -6,153,616,097,258,163,000 | 33.364721 | 116 | 0.662576 | false |
tojon/treeherder | treeherder/webapp/api/performance_data.py | 2 | 14534 | import datetime
import time
from collections import defaultdict
import django_filters
from django.conf import settings
from rest_framework import (exceptions,
filters,
pagination,
viewsets)
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from treeherder.model import models
from treeherder.perf.alerts import get_alert_properties
from treeherder.perf.models import (PerformanceAlert,
PerformanceAlertSummary,
PerformanceBugTemplate,
PerformanceDatum,
PerformanceFramework,
PerformanceSignature)
from treeherder.webapp.api.permissions import IsStaffOrReadOnly
from .performance_serializers import (PerformanceAlertSerializer,
PerformanceAlertSummarySerializer,
PerformanceBugTemplateSerializer,
PerformanceFrameworkSerializer)
class PerformanceSignatureViewSet(viewsets.ViewSet):
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_data = PerformanceSignature.objects.filter(
repository=repository).select_related(
'parent_signature__signature_hash', 'option_collection',
'platform')
parent_signature_hashes = request.query_params.getlist('parent_signature')
if parent_signature_hashes:
parent_signatures = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=parent_signature_hashes)
signature_data = signature_data.filter(
parent_signature__in=parent_signatures)
if not int(request.query_params.get('subtests', True)):
signature_data = signature_data.filter(parent_signature__isnull=True)
signature_ids = request.query_params.getlist('id')
if signature_ids:
signature_data = signature_data.filter(id__in=map(int,
signature_ids))
signature_hashes = request.query_params.getlist('signature')
if signature_hashes:
signature_data = signature_data.filter(
signature_hash__in=signature_hashes)
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
signature_data = signature_data.filter(last_updated__gte=start_date)
if end_date:
signature_data = signature_data.filter(last_updated__lte=end_date)
platform = request.query_params.get('platform')
if platform:
platforms = models.MachinePlatform.objects.filter(
platform=platform)
signature_data = signature_data.filter(
platform__in=platforms)
ret = {}
for (id, signature_hash, option_collection_hash, platform, framework,
suite, test, lower_is_better, extra_options,
has_subtests, parent_signature_hash) in signature_data.values_list(
'id',
'signature_hash',
'option_collection__option_collection_hash',
'platform__platform', 'framework', 'suite',
'test', 'lower_is_better',
'extra_options', 'has_subtests',
'parent_signature__signature_hash').distinct():
ret[signature_hash] = {
'id': id,
'framework_id': framework,
'option_collection_hash': option_collection_hash,
'machine_platform': platform,
'suite': suite
}
if not lower_is_better:
# almost always true, save some banwidth by assuming that by
# default
ret[signature_hash]['lower_is_better'] = False
if test:
# test may be empty in case of a summary test, leave it empty
# then
ret[signature_hash]['test'] = test
if has_subtests:
ret[signature_hash]['has_subtests'] = True
if parent_signature_hash:
# this value is often null, save some bandwidth by excluding
# it if not present
ret[signature_hash]['parent_signature'] = parent_signature_hash
if extra_options:
# extra_options stored as charField but api returns as list
ret[signature_hash]['extra_options'] = extra_options.split(' ')
return Response(ret)
class PerformancePlatformViewSet(viewsets.ViewSet):
"""
All platforms for a particular branch that have performance data
"""
def list(self, request, project):
signature_data = PerformanceSignature.objects.filter(
repository__name=project)
interval = request.query_params.get('interval')
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
return Response(signature_data.values_list(
'platform__platform', flat=True).distinct())
class PerformanceFrameworkViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceFramework.objects.all()
serializer_class = PerformanceFrameworkSerializer
filter_backends = [filters.OrderingFilter]
ordering = 'id'
class PerformanceDatumViewSet(viewsets.ViewSet):
"""
This view serves performance test result data
"""
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_hashes = request.query_params.getlist("signatures")
push_ids = request.query_params.getlist("push_id")
try:
job_ids = [int(job_id) for job_id in
request.query_params.getlist("job_id")]
except ValueError:
return Response({"message": "Job id(s) must be specified as integers"},
status=HTTP_400_BAD_REQUEST)
if not (signature_hashes or push_ids or job_ids):
raise exceptions.ValidationError('Need to specify either '
'signatures, push_id, or '
'job_id')
datums = PerformanceDatum.objects.filter(
repository=repository).select_related(
'signature__signature_hash').order_by('push_timestamp')
if signature_hashes:
signature_ids = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=signature_hashes).values_list('id', flat=True)
datums = datums.filter(signature__id__in=list(signature_ids))
if push_ids:
datums = datums.filter(push_id__in=push_ids)
if job_ids:
datums = datums.filter(job_id__in=job_ids)
frameworks = request.query_params.getlist('framework')
if frameworks:
datums = datums.filter(
signature__framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
datums = datums.filter(
push_timestamp__gt=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
datums = datums.filter(push_timestamp__gt=start_date)
if end_date:
datums = datums.filter(push_timestamp__lt=end_date)
ret = defaultdict(list)
values_list = datums.values_list(
'id', 'signature_id', 'signature__signature_hash', 'job_id', 'push_id',
'push_timestamp', 'value')
for (id, signature_id, signature_hash, job_id, push_id,
push_timestamp, value) in values_list:
ret[signature_hash].append({
'id': id,
'signature_id': signature_id,
'job_id': job_id,
'push_id': push_id,
'push_timestamp': int(time.mktime(push_timestamp.timetuple())),
'value': round(value, 2) # round to 2 decimal places
})
return Response(ret)
class AlertSummaryPagination(pagination.PageNumberPagination):
ordering = ('-last_updated', '-id')
page_size = 10
class PerformanceAlertSummaryViewSet(viewsets.ModelViewSet):
"""ViewSet for the performance alert summary model"""
queryset = PerformanceAlertSummary.objects.filter(repository__active_status='active').prefetch_related(
'alerts', 'alerts__series_signature',
'repository',
'alerts__series_signature__platform',
'alerts__series_signature__option_collection',
'alerts__series_signature__option_collection__option')
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSummarySerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id', 'status', 'framework', 'repository',
'alerts__series_signature__signature_hash']
ordering = ('-last_updated', '-id')
pagination_class = AlertSummaryPagination
def create(self, request, *args, **kwargs):
data = request.data
alert_summary, _ = PerformanceAlertSummary.objects.get_or_create(
repository_id=data['repository_id'],
framework=PerformanceFramework.objects.get(id=data['framework_id']),
push_id=data['push_id'],
prev_push_id=data['prev_push_id'],
defaults={
'manually_created': True,
'last_updated': datetime.datetime.now()
})
return Response({"alert_summary_id": alert_summary.id})
class PerformanceAlertViewSet(viewsets.ModelViewSet):
queryset = PerformanceAlert.objects.all()
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id']
ordering = ('-id')
class AlertPagination(pagination.CursorPagination):
ordering = ('-id')
page_size = 10
pagination_class = AlertPagination
def update(self, request, *args, **kwargs):
request.data['classifier'] = request.user.email
return super(PerformanceAlertViewSet, self).update(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
data = request.data
if 'summary_id' not in data or 'signature_id' not in data:
return Response({"message": "Summary and signature ids necessary "
"to create alert"}, status=HTTP_400_BAD_REQUEST)
summary = PerformanceAlertSummary.objects.get(
id=data['summary_id'])
signature = PerformanceSignature.objects.get(
id=data['signature_id'])
prev_range = signature.max_back_window
if not prev_range:
prev_range = settings.PERFHERDER_ALERTS_MAX_BACK_WINDOW
new_range = signature.fore_window
if not new_range:
new_range = settings.PERFHERDER_ALERTS_FORE_WINDOW
prev_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__lte=summary.prev_push.time).order_by(
'-push_timestamp').values_list('value', flat=True)[:prev_range]
new_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__gt=summary.prev_push.time).order_by(
'push_timestamp').values_list('value', flat=True)[:new_range]
if not prev_data or not new_data:
return Response({"message": "Insufficient data to create an "
"alert"}, status=HTTP_400_BAD_REQUEST)
prev_value = sum(prev_data)/len(prev_data)
new_value = sum(new_data)/len(new_data)
alert_properties = get_alert_properties(prev_value, new_value,
signature.lower_is_better)
alert, _ = PerformanceAlert.objects.get_or_create(
summary=summary,
series_signature=signature,
defaults={
'is_regression': alert_properties.is_regression,
'manually_created': True,
'amount_pct': alert_properties.pct_change,
'amount_abs': alert_properties.delta,
'prev_value': prev_value,
'new_value': new_value,
't_value': 1000
})
return Response({"alert_id": alert.id})
class PerformanceBugTemplateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceBugTemplate.objects.all()
serializer_class = PerformanceBugTemplateSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['framework']
| mpl-2.0 | -2,020,118,467,896,977,400 | 41.00578 | 107 | 0.597633 | false |
x111ong/django | tests/flatpages_tests/test_middleware.py | 290 | 8134 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Root</p>")
| bsd-3-clause | 5,911,133,528,806,033,000 | 44.188889 | 114 | 0.671625 | false |
Diiaablo95/friendsNet | test/services_api_test_media_item.py | 1 | 6919 | import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
HAL_JSON = "application/hal+json"
MEDIA_ITEM_PROFILE = "/profiles/media_item-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class MediaItemTestCase(ResourcesAPITestCase):
resp_get = {
"id" : 1,
"media_item_type" : 0,
"url" : "/friendsNet/media_uploads/media1.jpg",
"description" : "Flowers are wonderful!",
"_links" : {
"self" : {"href" : "/friendsNet/api/media/1/", "profile" : "/profiles/media_item-profile"},
"media list" : {"href" : "/friendsNet/api/media/"}
},
"template" : {
"data" : [
{"name" : "description", "value" : "", "prompt" : "Media item description", "required" : "false"}
]
}
}
media_patch_correct = {
"template" : {
"data" : [
{"name" : "description", "value" : "New description!"}
]
}
}
media_patch_empty = {
"template" : {
"data" : []
}
}
def setUp(self):
super(MediaItemTestCase, self).setUp()
self.url = resources.api.url_for(resources.Media_item, media_id = 1, _external = False)
self.url_wrong = resources.api.url_for(resources.Media_item, media_id = 999, _external = False)
#TEST URL
def test_url(self):
#Checks that the URL points to the right resource
_url = '/friendsNet/api/media/1/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.Media_item)
def test_wrong_url(self):
#Checks that GET Friendship returns correct status code if given a wrong id
resp = self.client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)
href = data["resource_url"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["code"]
self.assertEquals(error, 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_media_item(self):
print '('+self.test_get_media_item.__name__+')', self.test_get_media_item.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), HAL_JSON)
#404
def test_get_not_existing_media_item(self):
print '('+self.test_get_not_existing_media_item.__name__+')', self.test_get_not_existing_media_item.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
#TEST PATCH
#204
def test_patch_media_item(self):
print '('+self.test_patch_media_item.__name__+')', self.test_patch_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
resp2 = self.client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp2.status_code, 200)
data = json.loads(resp2.data)
new_value = data["description"]
self.assertEquals(new_value, self.media_patch_correct["template"]["data"][0]["value"])
#PATCH EMPTY
def test_patch_empty_media_item(self):
print '('+self.test_patch_empty_media_item.__name__+')', self.test_patch_empty_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_empty), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#404
def test_patch_not_existing_media_item(self):
print '('+self.test_patch_not_existing_media_item.__name__+')', self.test_patch_not_existing_media_item.__doc__
resp = self.client.patch(self.url_wrong, data = json.dumps(self.media_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
#415
def test_patch_wrong_header_media_item(self):
print '('+self.test_patch_wrong_header_media_item.__name__+')', self.test_patch_wrong_header_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_correct))
self.assertEquals(resp.status_code, 415)
#TEST DELETE
#204
def test_delete_existing_media_item(self):
print '('+self.test_delete_existing_media_item.__name__+')', self.test_delete_existing_media_item.__doc__
resp = self.client.delete(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#404
def test_delete_not_existing_media_item(self):
print '('+self.test_delete_not_existing_media_item.__name__+')', self.test_delete_not_existing_media_item.__doc__
resp = self.client.delete(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
if __name__ == '__main__':
unittest.main()
print 'Start running tests' | gpl-3.0 | 1,968,526,355,504,813,300 | 37.444444 | 139 | 0.623645 | false |
kxz/waapuro | setup.py | 1 | 1353 | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test
class Tox(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='waapuro',
description='A dead-simple hiragana and katakana romanization library',
version='1.0.1',
author='Kevin Xiwei Zheng',
author_email='[email protected]',
url='https://github.com/kxz/waapuro',
license='X11',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Japanese',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries'],
keywords='japanese kana hiragana katakana romanization',
packages=find_packages(),
install_requires=[
'future'],
tests_require=[
'tox'],
cmdclass={
'test': Tox})
| mit | -5,876,401,651,510,714,000 | 27.787234 | 75 | 0.616408 | false |
LeandroRoberto/acrobatasdovento | node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit | -3,503,384,805,970,845,000 | 34.868512 | 87 | 0.578912 | false |
grow/pygrow | grow/documents/document_fields_test.py | 1 | 1484 | """Tests for the document fields."""
import copy
import unittest
from grow.documents import document_fields
class DocumentFieldsTestCase(unittest.TestCase):
def testContains(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals(True, 'foo' in doc_fields)
self.assertEquals(False, 'bar' in doc_fields)
def testGet(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals('bar', doc_fields['foo'])
self.assertEquals('baz', doc_fields.get('bar', 'baz'))
def testGetItem(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals('bar', doc_fields['foo'])
with self.assertRaises(KeyError):
doc_fields['bar']
def testLen(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
'bar': 'baz',
}, None)
self.assertEquals(2, len(doc_fields))
def test_update(self):
"""Test that updates properly overwrite and are untagged."""
doc_fields = document_fields.DocumentFields({
'foo@': 'bar',
})
self.assertEquals('bar', doc_fields['foo'])
doc_fields.update({
'foo@': 'bbq',
})
self.assertEquals('bbq', doc_fields['foo'])
if __name__ == '__main__':
unittest.main()
| mit | -8,249,390,655,376,449,000 | 25.035088 | 68 | 0.563342 | false |
kennethgillen/ansible | lib/ansible/plugins/cache/pickle.py | 27 | 1645 | # (c) 2017, Brian Coca
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: yaml
short_description: File backed, using Python's pickle.
description:
- File backed cache that uses Python's pickle serialization as a format, the files are per host.
version_added: "2.3"
author: Brian Coca (@bcoca)
'''
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import cPickle as pickle
except ImportError:
import pickle
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
def _load(self, filepath):
# Pickle is a binary format
with open(filepath, 'rb') as f:
return pickle.load(f)
def _dump(self, value, filepath):
with open(filepath, 'wb') as f:
# Use pickle protocol 2 which is compatible with Python 2.3+.
pickle.dump(value, f, protocol=2)
| gpl-3.0 | 4,702,692,848,298,006,000 | 31.254902 | 104 | 0.699088 | false |
imply/chuu | third_party/closure_linter/closure_linter/statetracker.py | 135 | 31214 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import re
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class DocFlag(object):
"""Generic doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag type,
including braces.
type_end_token: The last token specifying the flag type,
including braces.
type: The type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# The list of standard jsdoc tags is from
STANDARD_DOC = frozenset([
'author',
'bug',
'const',
'constructor',
'define',
'deprecated',
'enum',
'export',
'extends',
'externs',
'fileoverview',
'implements',
'implicitCast',
'interface',
'lends',
'license',
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
'param',
'preserve',
'private',
'return',
'see',
'supported',
'template',
'this',
'type',
'typedef',
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
LEGAL_DOC = STANDARD_DOC | ANNOTATION
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'nonStandardJsDocs',
'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
'uselessCode',
'visibility',
'with'])
HAS_DESCRIPTION = frozenset([
'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
'preserve', 'return', 'supported'])
HAS_TYPE = frozenset([
'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
'suppress'])
TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type'])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
def __init__(self, flag_token):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
Type.FLAG_ENDING_TYPES)
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
flag_token.next.type not in Type.FLAG_ENDING_TYPES):
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
self.name_token = _GetNextIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
# identifier after type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token, self.type_start_token) > 0):
self.name_token = _GetNextIdentifierToken(self.type_end_token)
if self.name_token:
self.name = self.name_token.string
# Extract description, if applicable.
self.description_start_token = None
self.description_end_token = None
self.description = None
if self.flag_type in self.HAS_DESCRIPTION:
search_start_token = flag_token
if self.name_token and self.type_end_token:
if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
search_start_token = self.type_end_token
else:
search_start_token = self.name_token
elif self.name_token:
search_start_token = self.name_token
elif self.type:
search_start_token = self.type_end_token
interesting_token = tokenutil.Search(search_start_token,
Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
self.description_start_token = interesting_token
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
class DocComment(object):
"""JavaScript doc comment object.
Attributes:
ordered_params: Ordered list of parameters documented.
start_token: The token that starts the doc comment.
end_token: The token that ends the doc comment.
suppressions: Map of suppression type to the token that added it.
"""
def __init__(self, start_token):
"""Create the doc comment object.
Args:
start_token: The first token in the doc comment.
"""
self.__params = {}
self.ordered_params = []
self.__flags = {}
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
This is a short-circuiting mechanism so that we don't emit false
positives about well-formed doc comments just because we don't support
hot new syntaxes.
"""
self.invalidated = True
def IsInvalidated(self):
"""Test whether Invalidate() has been called."""
return self.invalidated
def AddParam(self, name, param_type):
"""Add a new documented parameter.
Args:
name: The name of the parameter to document.
param_type: The parameter's declared JavaScript type.
"""
self.ordered_params.append(name)
self.__params[name] = param_type
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
#TODO(user): Error if no braces
brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
for flag_type in self.__flags.keys():
if flag_type != 'suppress':
return False
return True
def AddFlag(self, flag):
"""Add a new document flag.
Args:
flag: DocFlag object.
"""
self.__flags[flag.flag_type] = flag
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
Args:
flag_type: The type of the flag to check.
Returns:
True if the flag is set.
"""
return flag_type in self.__flags
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
Args:
flag_type: The type of the flag to get.
Returns:
The last instance of the given flag type in this doc comment.
"""
return self.__flags[flag_type]
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list.
"""
source_len, target_len = len(self.ordered_params), len(params)
edit_lists = [[]]
distance = [[]]
for i in range(target_len+1):
edit_lists[0].append(['I'] * i)
distance[0].append(i)
for j in range(1, source_len+1):
edit_lists.append([['D'] * j])
distance.append([j])
for i in range(source_len):
for j in range(target_len):
cost = 1
if self.ordered_params[i] == params[j]:
cost = 0
deletion = distance[i][j+1] + 1
insertion = distance[i+1][j] + 1
substitution = distance[i][j] + cost
edit_list = None
best = None
if deletion <= insertion and deletion <= substitution:
# Deletion is best.
best = deletion
edit_list = list(edit_lists[i][j+1])
edit_list.append('D')
elif insertion <= substitution:
# Insertion is best.
best = insertion
edit_list = list(edit_lists[i+1][j])
edit_list.append('I')
edit_lists[i+1].append(edit_list)
else:
# Substitution is best.
best = substitution
edit_list = list(edit_lists[i][j])
if cost:
edit_list.append('S')
else:
edit_list.append('=')
edit_lists[i+1].append(edit_list)
distance[i+1].append(best)
return distance[source_len][target_len], edit_lists[source_len][target_len]
def __repr__(self):
"""Returns a string representation of this object.
Returns:
A string representation of this object.
"""
return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
#
# Helper methods used by DocFlag and DocComment to parse out flag information.
#
def _GetMatchingEndBraceAndContents(start_brace):
"""Returns the matching end brace and contents between the two braces.
If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
that token is used as the matching ending token. Contents will have all
comment prefixes stripped out of them, and all comment prefixes in between the
start and end tokens will be split out into separate DOC_PREFIX tokens.
Args:
start_brace: The DOC_START_BRACE token immediately before desired contents.
Returns:
The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
of the contents between the matching tokens, minus any comment prefixes.
"""
open_count = 1
close_count = 0
contents = []
# We don't consider the start brace part of the type string.
token = start_brace.next
while open_count != close_count:
if token.type == Type.DOC_START_BRACE:
open_count += 1
elif token.type == Type.DOC_END_BRACE:
close_count += 1
if token.type != Type.DOC_PREFIX:
contents.append(token.string)
if token.type in Type.FLAG_ENDING_TYPES:
break
token = token.next
#Don't include the end token (end brace, end doc comment, etc.) in type.
token = token.previous
contents = contents[:-1]
return token, ''.join(contents)
def _GetNextIdentifierToken(start_token):
"""Searches for and returns the first identifier at the beginning of a token.
Searches each token after the start to see if it starts with an identifier.
If found, will split the token into at most 3 piecies: leading whitespace,
identifier, rest of token, returning the identifier token. If no identifier is
found returns None and changes no tokens. Search is abandoned when a
FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
The identifier token is found, None otherwise.
"""
token = start_token.next
while token and not token.type in Type.FLAG_ENDING_TYPES:
match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
token.string)
if (match is not None and token.type == Type.COMMENT and
len(token.string) == len(match.group(0))):
return token
token = token.next
return None
def _GetEndTokenAndContents(start_token):
"""Returns last content token and all contents before FLAG_ENDING_TYPE token.
Comment prefixes are split into DOC_PREFIX tokens and stripped from the
returned contents.
Args:
start_token: The token immediately before the first content token.
Returns:
The last content token and a string of all contents including start and
end tokens, with comment prefixes stripped.
"""
iterator = start_token
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# ending of the description. This handles a case like:
#
# * @return {boolean} True
# *
# * Note: This is a sentence.
#
# The note is not part of the @return description, but there was
# no definitive ending token. Rather there was a line containing
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
iterator = iterator.next
if iterator.line_number != last_line:
contents += '\n'
last_line = iterator.line_number
end_token = last_token
if DocFlag.EMPTY_STRING.match(contents):
contents = None
else:
# Strip trailing newline.
contents = contents[:-1]
return end_token, contents
class Function(object):
"""Data about a JavaScript function.
Attributes:
block_depth: Block depth the function began at.
doc: The DocComment associated with the function.
has_return: If the function has a return value.
has_this: If the function references the 'this' object.
is_assigned: If the function is part of an assignment.
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
"""
def __init__(self, block_depth, is_assigned, doc, name):
self.block_depth = block_depth
self.is_assigned = is_assigned
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
class StateTracker(object):
"""EcmaScript state tracker.
Tracks block depth, function names, etc. within an EcmaScript token stream.
"""
OBJECT_LITERAL = 'o'
CODE = 'c'
def __init__(self, doc_flag=DocFlag):
"""Initializes a JavaScript token stream state tracker.
Args:
doc_flag: An optional custom DocFlag used for validating
documentation flags.
"""
self._doc_flag = doc_flag
self.Reset()
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
self._functions = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
self._cumulative_params = None
self._block_types = []
self._last_non_space_token = None
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
def InFunction(self):
"""Returns true if the current token is within a function.
Returns:
True if the current token is within a function.
"""
return bool(self._functions)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
Returns:
True if the current token is within a constructor.
"""
return self.InFunction() and self._functions[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
Returns:
True if the current token is within an interface method.
"""
if self.InFunction():
if self._functions[-1].is_interface:
return True
else:
name = self._functions[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
if (class_function_name in self._functions_by_name and
self._functions_by_name[class_function_name].is_interface):
return True
return False
def InTopLevelFunction(self):
"""Returns true if the current token is within a top level function.
Returns:
True if the current token is within a top level function.
"""
return len(self._functions) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
Returns:
True if if the current token is within a function variable
"""
return self.InFunction() and self._functions[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
Returns:
True if the current token is a function block open.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
Returns:
True if the current token is a function block close.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
Returns:
True if the current token is within a block.
"""
return bool(self._block_depth)
def IsBlockClose(self):
"""Returns true if the current token is a block close.
Returns:
True if the current token is a block close.
"""
return self._is_block_close
def InObjectLiteral(self):
"""Returns true if the current token is within an object literal.
Returns:
True if the current token is within an object literal.
"""
return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types
def InParentheses(self):
"""Returns true if the current token is within parentheses.
Returns:
True if the current token is within parentheses.
"""
return bool(self._paren_depth)
def InTopLevel(self):
"""Whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
"""
raise TypeError('Abstract method InTopLevel not implemented')
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK.
Returns:
Code block type for current token.
"""
raise TypeError('Abstract method GetBlockType not implemented')
def GetParams(self):
"""Returns the accumulated input params as an array.
In some EcmasSript languages, input params are specified like
(param:Type, param2:Type2, ...)
in other they are specified just as
(param, param2)
We handle both formats for specifying parameters here and leave
it to the compilers for each language to detect compile errors.
This allows more code to be reused between lint checkers for various
EcmaScript languages.
Returns:
The accumulated input params as an array.
"""
params = []
if self._cumulative_params:
params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
# Strip out the type from parameters of the form name:Type.
params = map(lambda param: param.split(':')[0], params)
return params
def GetLastComment(self):
"""Return the last plain comment that could be used as documentation.
Returns:
The last plain comment that could be used as documentation.
"""
return self._last_comment
def GetDocComment(self):
"""Return the most recent applicable documentation comment.
Returns:
The last applicable documentation comment.
"""
return self._doc_comment
def HasDocComment(self, identifier):
"""Returns whether the identifier has been documented yet.
Args:
identifier: The identifier.
Returns:
Whether the identifier has been documented yet.
"""
return identifier in self._documented_identifiers
def InDocComment(self):
"""Returns whether the current token is in a doc comment.
Returns:
Whether the current token is in a doc comment.
"""
return self._doc_comment and self._doc_comment.end_token is None
def GetDocFlag(self):
"""Returns the current documentation flags.
Returns:
The current documentation flags.
"""
return self._doc_flag
def IsTypeToken(self, t):
if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
None, True)
if f and f.attached_object.type_start_token is not None:
return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
def GetFunction(self):
"""Return the function the current code block is a part of.
Returns:
The current Function object.
"""
if self._functions:
return self._functions[-1]
def GetBlockDepth(self):
"""Return the block depth.
Returns:
The current block depth.
"""
return self._block_depth
def GetLastNonSpaceToken(self):
"""Return the last non whitespace token."""
return self._last_non_space_token
def GetLastLine(self):
"""Return the last line."""
return self._last_line
def GetFirstToken(self):
"""Return the very first token in the file."""
return self._first_token
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token:
"""
self._is_block_close = False
if not self._first_token:
self._first_token = token
# Track block depth.
type = token.type
if type == Type.START_BLOCK:
self._block_depth += 1
# Subclasses need to handle block start very differently because
# whether a block is a CODE or OBJECT_LITERAL block varies significantly
# by language.
self._block_types.append(self.GetBlockType(token))
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
self._block_depth -= 1
self._block_types.pop()
# Track parentheses depth.
elif type == Type.START_PAREN:
self._paren_depth += 1
# Track parentheses depth.
elif type == Type.END_PAREN:
self._paren_depth -= 1
elif type == Type.COMMENT:
self._last_comment = token.string
elif type == Type.START_DOC_COMMENT:
self._last_comment = None
self._doc_comment = DocComment(token)
elif type == Type.END_DOC_COMMENT:
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
flag = self._doc_flag(token)
token.attached_object = flag
self._doc_comment.AddFlag(flag)
if flag.flag_type == 'param' and flag.name:
self._doc_comment.AddParam(flag.name, flag.type)
elif flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
# Only functions outside of parens are eligible for documentation.
if not self._paren_depth:
doc = self._doc_comment
name = ''
is_assigned = last_code and (last_code.IsOperator('=') or
last_code.IsOperator('||') or last_code.IsOperator('&&') or
(last_code.IsOperator(':') and not self.InObjectLiteral()))
if is_assigned:
# TODO(robbyw): This breaks for x[2] = ...
# Must use loop to find full function name in the case of line-wrapped
# declarations (bug 1220601) like:
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
while identifier and identifier.type in (
Type.IDENTIFIER, Type.SIMPLE_LVALUE):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
identifier = identifier.previous
if not identifier or not identifier.type in Type.NON_CODE_TYPES:
break
else:
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
while next_token and next_token.IsType(Type.FUNCTION_NAME):
name += next_token.string
next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
self._functions.append(function)
self._functions_by_name[name] = function
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if not next_token.IsType(Type.SEMICOLON):
function = self.GetFunction()
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
if jsdoc:
self._documented_identifiers.add(identifier)
self._HandleIdentifier(identifier, True)
elif type == Type.IDENTIFIER:
self._HandleIdentifier(token.string, False)
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
def _HandleIdentifier(self, identifier, is_assignment):
"""Process the given identifier.
Currently checks if it references 'this' and annotates the function
accordingly.
Args:
identifier: The identifer to process.
is_assignment: Whether the identifer is being written to.
"""
if identifier == 'this' or identifier.startswith('this.'):
function = self.GetFunction()
if function:
function.has_this = True
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
This function should be used for destructive state changes such as
deleting a tracked object.
Args:
token: The token to handle.
"""
type = token.type
if type == Type.SEMICOLON or type == Type.END_PAREN or (
type == Type.END_BRACKET and
self._last_non_space_token.type not in (
Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
# We end on any numeric array index, but keep going for string based
# array indices so that we pick up manually exported identifiers.
self._doc_comment = None
self._last_comment = None
elif type == Type.END_BLOCK:
self._doc_comment = None
self._last_comment = None
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
self._functions.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
self._last_comment = None
if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
self._last_non_space_token = token
self._last_line = token.line
| bsd-3-clause | 3,662,040,502,235,770,400 | 29.96627 | 93 | 0.653905 | false |
sileht/gnocchi | gnocchi/tests/functional/fixtures.py | 1 | 9173 | #
# Copyright 2015-2017 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for use with gabbi tests."""
from __future__ import absolute_import
import logging
import os
import shutil
import subprocess
import tempfile
import threading
import time
from unittest import case
import uuid
import warnings
import fixtures
from gabbi import fixture
import numpy
from oslo_config import cfg
from oslo_middleware import cors
import sqlalchemy_utils
import yaml
from gnocchi import chef
from gnocchi.cli import metricd
from gnocchi import incoming
from gnocchi import indexer
from gnocchi.indexer import sqlalchemy
from gnocchi.rest import app
from gnocchi import service
from gnocchi import storage
from gnocchi.tests import base
from gnocchi.tests import utils
# NOTE(chdent): Hack to restore semblance of global configuration to
# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso
# configuration, and the pecan application configuration of
# which the critical part is a reference to the current indexer.
LOAD_APP_KWARGS = None
def setup_app():
global LOAD_APP_KWARGS
return app.load_app(**LOAD_APP_KWARGS)
class AssertNAN(yaml.YAMLObject):
def __eq__(self, other):
try:
return numpy.isnan(other)
except TypeError:
return False
yaml.add_constructor(u'!AssertNAN', lambda loader, node: AssertNAN())
class ConfigFixture(fixture.GabbiFixture):
"""Establish the relevant configuration fixture, per test file.
Each test file gets its own oslo config and its own indexer and storage
instance. The indexer is based on the current database url. The storage
uses a temporary directory.
To use this fixture in a gabbit add::
fixtures:
- ConfigFixture
"""
def __init__(self):
self.conf = None
self.tmp_dir = None
def start_fixture(self):
"""Create necessary temp files and do the config dance."""
global LOAD_APP_KWARGS
if not os.getenv("GNOCCHI_TEST_DEBUG"):
self.output = base.CaptureOutput()
self.output.setUp()
data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')
if os.getenv("GABBI_LIVE"):
dcf = None
else:
dcf = []
conf = service.prepare_service([], conf=utils.prepare_conf(),
default_config_files=dcf,
logging_level=logging.DEBUG,
skip_log_opts=True)
py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..',))
conf.set_override('paste_config',
os.path.join(py_root, 'rest', 'api-paste.ini'),
group="api")
conf.set_override('policy_file',
os.path.join(py_root, 'rest', 'policy.json'),
group="oslo_policy")
# NOTE(sileht): This is not concurrency safe, but only this tests file
# deal with cors, so we are fine. set_override don't work because cors
# group doesn't yet exists, and we the CORS middleware is created it
# register the option and directly copy value of all configurations
# options making impossible to override them properly...
cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")
self.conf = conf
self.tmp_dir = data_tmp_dir
if conf.indexer.url is None:
raise case.SkipTest("No indexer configured")
storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
conf.set_override('driver', storage_driver, 'storage')
if conf.storage.driver == 'file':
conf.set_override('file_basepath', data_tmp_dir, 'storage')
elif conf.storage.driver == 'ceph':
conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
'storage')
pool_name = uuid.uuid4().hex
with open(os.devnull, 'w') as f:
subprocess.call("rados -c %s mkpool %s" % (
os.getenv("CEPH_CONF"), pool_name), shell=True,
stdout=f, stderr=subprocess.STDOUT)
conf.set_override('ceph_pool', pool_name, 'storage')
elif conf.storage.driver == "s3":
conf.set_override('s3_endpoint_url',
os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
group="storage")
conf.set_override('s3_access_key_id', "gnocchi", group="storage")
conf.set_override('s3_secret_access_key', "anythingworks",
group="storage")
conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
"storage")
elif conf.storage.driver == "swift":
# NOTE(sileht): This fixture must start before any driver stuff
swift_fixture = fixtures.MockPatch(
'swiftclient.client.Connection',
base.FakeSwiftClient)
swift_fixture.setUp()
# NOTE(jd) All of that is still very SQL centric but we only support
# SQL for now so let's say it's good enough.
conf.set_override(
'url',
sqlalchemy.SQLAlchemyIndexer._create_new_database(
conf.indexer.url),
'indexer')
index = indexer.get_driver(conf)
index.upgrade()
# Set pagination to a testable value
conf.set_override('max_limit', 7, 'api')
conf.set_override('enable_proxy_headers_parsing', True, group="api")
self.index = index
self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
conf.coordination_url)
s = storage.get_driver(conf)
i = incoming.get_driver(conf)
if conf.storage.driver == 'redis':
# Create one prefix per test
s.STORAGE_PREFIX = str(uuid.uuid4()).encode()
if conf.incoming.driver == 'redis':
i.SACK_NAME_FORMAT = (
str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
)
self.fixtures = [
fixtures.MockPatch("gnocchi.storage.get_driver",
return_value=s),
fixtures.MockPatch("gnocchi.incoming.get_driver",
return_value=i),
fixtures.MockPatch("gnocchi.indexer.get_driver",
return_value=self.index),
fixtures.MockPatch(
"gnocchi.cli.metricd.get_coordinator_and_start",
return_value=self.coord),
]
for f in self.fixtures:
f.setUp()
if conf.storage.driver == 'swift':
self.fixtures.append(swift_fixture)
LOAD_APP_KWARGS = {
'conf': conf,
}
s.upgrade()
i.upgrade(128)
# start up a thread to async process measures
self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
self.metricd_thread.start()
def stop_fixture(self):
"""Clean up the config fixture and storage artifacts."""
if hasattr(self, 'metricd_thread'):
self.metricd_thread.stop()
self.metricd_thread.join()
if hasattr(self, 'fixtures'):
for f in reversed(self.fixtures):
f.cleanUp()
if hasattr(self, 'index'):
self.index.disconnect()
# Swallow noise from missing tables when dropping
# database.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
module='sqlalchemy.engine.default')
sqlalchemy_utils.drop_database(self.conf.indexer.url)
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
if hasattr(self, 'coord'):
self.coord.stop()
self.conf.reset()
if not os.getenv("GNOCCHI_TEST_DEBUG"):
self.output.cleanUp()
class MetricdThread(threading.Thread):
"""Run metricd in a naive thread to process measures."""
def __init__(self, chef, name='metricd'):
super(MetricdThread, self).__init__(name=name)
self.chef = chef
self.flag = True
def run(self):
while self.flag:
for sack in self.chef.incoming.iter_sacks():
self.chef.process_new_measures_for_sack(sack, blocking=True)
time.sleep(0.1)
def stop(self):
self.flag = False
| apache-2.0 | -4,736,587,045,291,557,000 | 33.355805 | 79 | 0.589774 | false |
meabsence/python-for-android | python3-alpha/python3-src/Lib/encodings/iso8859_3.py | 272 | 13089 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
'\u02d8' # 0xA2 -> BREVE
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\ufffe'
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\ufffe'
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\ufffe'
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 | 9,222,581,354,518,542,000 | 41.635179 | 107 | 0.522653 | false |
asm666/sympy | sympy/utilities/pytest.py | 78 | 4728 | """py.test hacks to support XFAIL/XPASS"""
from __future__ import print_function, division
import sys
import functools
import os
from sympy.core.compatibility import get_function_name
try:
import py
from py.test import skip, raises
USE_PYTEST = getattr(sys, '_running_pytest', False)
except ImportError:
USE_PYTEST = False
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
if not USE_PYTEST:
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.utilities.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
AssertionError: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
AssertionError: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException:
return
raise AssertionError("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
'\'raises(xxx, "code")\' has been phased out; '
'change \'raises(xxx, "expression")\' '
'to \'raises(xxx, lambda: expression)\', '
'\'raises(xxx, "statement")\' '
'to \'with raises(xxx): statement\'')
else:
raise TypeError(
'raises() expects a callable for the 2nd argument.')
class RaisesContext(object):
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise AssertionError("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(get_function_name(func))
else:
raise Skipped("Timeout")
raise XPass(get_function_name(func))
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to :func:`skip`, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
else:
XFAIL = py.test.mark.xfail
slow = py.test.mark.slow
def SKIP(reason):
def skipping(func):
@functools.wraps(func)
def inner(*args, **kwargs):
skip(reason)
return inner
return skipping
| bsd-3-clause | 6,991,592,825,264,225,000 | 28.006135 | 80 | 0.559433 | false |
ViennaChen/mysql-connector-python | setupinfo.py | 7 | 4296 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from distutils.core import Extension
import os
import sys
from lib.cpy_distutils import (
Install, InstallLib, BuildExtDynamic, BuildExtStatic
)
# Development Status Trove Classifiers significant for Connector/Python
DEVELOPMENT_STATUSES = {
'a': '3 - Alpha',
'b': '4 - Beta',
'rc': '4 - Beta', # There is no Classifier for Release Candidates
'': '5 - Production/Stable'
}
if not (((2, 6) <= sys.version_info < (3, 0)) or sys.version_info >= (3, 3)):
raise RuntimeError("Python v{major}.{minor} is not supported".format(
major=sys.version_info[0], minor=sys.version_info[1]
))
# Load version information
VERSION = [999, 0, 0, 'a', 0] # Set correct after version.py is loaded
version_py = os.path.join('lib', 'mysql', 'connector', 'version.py')
with open(version_py, 'rb') as fp:
exec(compile(fp.read(), version_py, 'exec'))
BuildExtDynamic.min_connector_c_version = (5, 5, 8)
command_classes = {
'build_ext': BuildExtDynamic,
'build_ext_static': BuildExtStatic,
'install_lib': InstallLib,
'install': Install,
}
package_dir = {'': 'lib'}
name = 'mysql-connector-python'
version = '{0}.{1}.{2}'.format(*VERSION[0:3])
extensions = [
Extension("_mysql_connector",
sources=[
"src/exceptions.c",
"src/mysql_capi.c",
"src/mysql_capi_conversion.c",
"src/mysql_connector.c",
"src/force_cpp_linkage.cc",
],
include_dirs=['src/include'],
)
]
packages = [
'mysql',
'mysql.connector',
'mysql.connector.locales',
'mysql.connector.locales.eng',
'mysql.connector.django',
'mysql.connector.fabric',
]
description = "MySQL driver written in Python"
long_description = """
MySQL driver written in Python which does not depend on MySQL C client
libraries and implements the DB API v2.0 specification (PEP-249).
"""
author = 'Oracle and/or its affiliates'
author_email = ''
maintainer = 'Geert Vanderkelen'
maintainer_email = '[email protected]'
cpy_gpl_license = "GNU GPLv2 (with FOSS License Exception)"
keywords = "mysql db",
url = 'http://dev.mysql.com/doc/connector-python/en/index.html'
download_url = 'http://dev.mysql.com/downloads/connector/python/'
classifiers = [
'Development Status :: %s' % (DEVELOPMENT_STATUSES[VERSION[3]]),
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Database',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
]
| gpl-2.0 | -3,137,106,722,098,829,000 | 36.034483 | 78 | 0.677142 | false |
kustodian/ansible | lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py | 11 | 17073 | #!/usr/bin/python
# Copyright (c) 2017 Jon Meran <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_compute_environment
short_description: Manage AWS Batch Compute Environments
description:
- This module allows the management of AWS Batch Compute Environments.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
compute_environment_name:
description:
- The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
are allowed.
required: true
type: str
type:
description:
- The type of the compute environment.
required: true
choices: ["MANAGED", "UNMANAGED"]
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
compute_environment_state:
description:
- The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
from a queue and can scale out automatically based on queues.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
type: str
service_role:
description:
- The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
services on your behalf.
required: true
type: str
compute_resource_type:
description:
- The type of compute resource.
required: true
choices: ["EC2", "SPOT"]
type: str
minv_cpus:
description:
- The minimum number of EC2 vCPUs that an environment should maintain.
required: true
type: int
maxv_cpus:
description:
- The maximum number of EC2 vCPUs that an environment can reach.
required: true
type: int
desiredv_cpus:
description:
- The desired number of EC2 vCPUS in the compute environment.
type: int
instance_types:
description:
- The instance types that may be launched.
required: true
type: list
elements: str
image_id:
description:
- The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
type: str
subnets:
description:
- The VPC subnets into which the compute resources are launched.
required: true
type: list
elements: str
security_group_ids:
description:
- The EC2 security groups that are associated with instances launched in the compute environment.
required: true
type: list
elements: str
ec2_key_pair:
description:
- The EC2 key pair that is used for instances launched in the compute environment.
type: str
instance_role:
description:
- The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
required: true
type: str
tags:
description:
- Key-value pair tags to be applied to resources that are launched in the compute environment.
type: dict
bid_percentage:
description:
- The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
must be below 20% of the current On-Demand price for that EC2 instance.
type: int
spot_iam_fleet_role:
description:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
type: str
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Compute Environment
aws_batch_compute_environment:
compute_environment_name: computeEnvironmentName
state: present
region: us-east-1
compute_environment_state: ENABLED
type: MANAGED
compute_resource_type: EC2
minv_cpus: 0
maxv_cpus: 2
desiredv_cpus: 1
instance_types:
- optimal
subnets:
- my-subnet1
- my-subnet2
security_group_ids:
- my-sg1
- my-sg2
instance_role: arn:aws:iam::<account>:instance-profile/<role>
tags:
tag1: value1
tag2: value2
service_role: arn:aws:iam::<account>:role/service-role/<role>
register: aws_batch_compute_environment_action
- name: show results
debug:
var: aws_batch_compute_environment_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_compute_environment_action: none
changed: false
invocation:
module_args:
aws_access_key: ~
aws_secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
ec2_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
- optimal
maxv_cpus: 8
minv_cpus: 0
profile: ~
region: us-east-1
security_group_ids:
- "*******"
security_token: ~
service_role: "arn:aws:iam::...."
spot_iam_fleet_role: ~
state: present
subnets:
- "******"
tags:
Environment: <name>
Name: <name>
type: MANAGED
validate_certs: true
response:
computeEnvironmentArn: "arn:aws:batch:...."
computeEnvironmentName: <name>
computeResources:
desiredvCpus: 0
instanceRole: "arn:aws:iam::..."
instanceTypes:
- optimal
maxvCpus: 8
minvCpus: 0
securityGroupIds:
- "******"
subnets:
- "*******"
tags:
Environment: <name>
Name: <name>
type: EC2
ecsClusterArn: "arn:aws:ecs:....."
serviceRole: "arn:aws:iam::..."
state: ENABLED
status: VALID
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, HAS_BOTO3
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
import re
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
compute_environment_name = module.params['compute_environment_name']
# validate compute environment name
if not re.search(r'^[\w\_:]+$', compute_environment_name):
module.fail_json(
msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
"and underscores.".format(compute_environment_name)
)
if not compute_environment_name.startswith('arn:aws:batch:'):
if len(compute_environment_name) > 128:
module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
.format(compute_environment_name))
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Compute Environment functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_compute_environment(module, connection):
try:
environments = connection.client().describe_compute_environments(
computeEnvironments=[module.params['compute_environment_name']]
)
if len(environments['computeEnvironments']) > 0:
return environments['computeEnvironments'][0]
else:
return None
except ClientError:
return None
def create_compute_environment(module, aws):
"""
Adds a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
params = (
'compute_environment_name', 'type', 'service_role')
api_params = set_api_params(module, params)
if module.params['compute_environment_state'] is not None:
api_params['state'] = module.params['compute_environment_state']
compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
'spot_iam_fleet_role')
compute_resources_params = set_api_params(module, compute_resources_param_list)
if module.params['compute_resource_type'] is not None:
compute_resources_params['type'] = module.params['compute_resource_type']
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
api_params['computeResources'] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def remove_compute_environment(module, aws):
"""
Remove a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = {'computeEnvironment': module.params['compute_environment_name']}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
compute_environment_state = module.params['compute_environment_state']
compute_environment_name = module.params['compute_environment_name']
service_role = module.params['service_role']
minv_cpus = module.params['minv_cpus']
maxv_cpus = module.params['maxv_cpus']
desiredv_cpus = module.params['desiredv_cpus']
action_taken = 'none'
update_env_response = ''
check_mode = module.check_mode
# check if the compute environment exists
current_compute_environment = get_current_compute_environment(module, aws)
response = current_compute_environment
if current_compute_environment:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Compute Environment configuration
compute_kwargs = {'computeEnvironment': compute_environment_name}
# Update configuration if needed
compute_resources = {}
if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
compute_kwargs.update({'state': compute_environment_state})
updates = True
if service_role and current_compute_environment['serviceRole'] != service_role:
compute_kwargs.update({'serviceRole': service_role})
updates = True
if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
compute_resources['minvCpus'] = minv_cpus
if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
compute_resources['maxvCpus'] = maxv_cpus
if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
compute_resources['desiredvCpus'] = desiredv_cpus
if len(compute_resources) > 0:
compute_kwargs['computeResources'] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = aws.client().update_compute_environment(**compute_kwargs)
if not update_env_response:
module.fail_json(msg='Unable to get compute environment information after creating')
changed = True
action_taken = "updated"
except (ParamValidationError, ClientError) as e:
module.fail_json(msg="Unable to update environment: {0}".format(to_native(e)),
exception=traceback.format_exc())
else:
# Create Batch Compute Environment
changed = create_compute_environment(module, aws)
# Describe compute environment
action_taken = 'added'
response = get_current_compute_environment(module, aws)
if not response:
module.fail_json(msg='Unable to get compute environment information after creating')
else:
if current_state == 'present':
# remove the compute environment
changed = remove_compute_environment(module, aws)
action_taken = 'deleted'
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_compute_environment_action, response
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
compute_environment_name=dict(required=True),
type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
service_role=dict(required=True),
compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
minv_cpus=dict(type='int', required=True),
maxv_cpus=dict(type='int', required=True),
desiredv_cpus=dict(type='int'),
instance_types=dict(type='list', required=True),
image_id=dict(),
subnets=dict(type='list', required=True),
security_group_ids=dict(type='list', required=True),
ec2_key_pair=dict(),
instance_role=dict(required=True),
tags=dict(type='dict'),
bid_percentage=dict(type='int'),
spot_iam_fleet_role=dict(),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
if __name__ == '__main__':
main()
| gpl-3.0 | -2,565,274,807,453,274,600 | 32.608268 | 126 | 0.608212 | false |
boundarydevices/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/java_heap_profiler.py | 8 | 3258 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import threading
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
try:
from pylib import constants # pylint: disable=F0401
except Exception:
constants = None
class JavaHeapProfiler(profiler.Profiler):
"""Android-specific, trigger and fetch java heap dumps."""
_DEFAULT_DEVICE_DIR = '/data/local/tmp/javaheap'
# TODO(bulach): expose this as a command line option somehow.
_DEFAULT_INTERVAL = 20
def __init__(self, browser_backend, platform_backend, output_path, state):
super(JavaHeapProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._run_count = 1
self._DumpJavaHeap(False)
self._timer = threading.Timer(self._DEFAULT_INTERVAL, self._OnTimer)
self._timer.start()
@classmethod
def name(cls):
return 'java-heap'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._timer.cancel()
self._DumpJavaHeap(True)
self._browser_backend.adb.device().old_interface.Adb().Pull(
self._DEFAULT_DEVICE_DIR, self._output_path)
self._browser_backend.adb.RunShellCommand(
'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
output_files = []
for f in os.listdir(self._output_path):
if os.path.splitext(f)[1] == '.aprof':
input_file = os.path.join(self._output_path, f)
output_file = input_file.replace('.aprof', '.hprof')
hprof_conv = os.path.join(constants.ANDROID_SDK_ROOT,
'tools', 'hprof-conv')
subprocess.call([hprof_conv, input_file, output_file])
output_files.append(output_file)
return output_files
def _OnTimer(self):
self._DumpJavaHeap(False)
def _DumpJavaHeap(self, wait_for_completion):
if not self._browser_backend.adb.device().old_interface.FileExistsOnDevice(
self._DEFAULT_DEVICE_DIR):
self._browser_backend.adb.RunShellCommand(
'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
self._browser_backend.adb.RunShellCommand(
'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
device_dump_file = None
for pid in self._GetProcessOutputFileMap().iterkeys():
device_dump_file = '%s/%s.%s.aprof' % (self._DEFAULT_DEVICE_DIR, pid,
self._run_count)
self._browser_backend.adb.RunShellCommand('am dumpheap %s %s' %
(pid, device_dump_file))
if device_dump_file and wait_for_completion:
util.WaitFor(lambda: self._FileSize(device_dump_file) > 0, timeout=2)
self._run_count += 1
def _FileSize(self, file_name):
f = self._browser_backend.adb.device().old_interface.ListPathContents(
file_name)
return f.get(os.path.basename(file_name), (0, ))[0]
| bsd-3-clause | -7,030,288,472,757,409,000 | 36.022727 | 79 | 0.66329 | false |
arantebillywilson/python-snippets | microblog/flask/lib/python3.5/site-packages/whoosh/util/__init__.py | 52 | 4424 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import random, sys, time
from bisect import insort, bisect_left
from functools import wraps
from whoosh.compat import xrange
# These must be valid separate characters in CASE-INSENSTIVE filenames
IDCHARS = "0123456789abcdefghijklmnopqrstuvwxyz"
if hasattr(time, "perf_counter"):
now = time.perf_counter
elif sys.platform == 'win32':
now = time.clock
else:
now = time.time
def random_name(size=28):
return "".join(random.choice(IDCHARS) for _ in xrange(size))
def random_bytes(size=28):
gen = (random.randint(0, 255) for _ in xrange(size))
if sys.version_info[0] >= 3:
return bytes(gen)
else:
return array("B", gen).tostring()
def make_binary_tree(fn, args, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
arguments and returns a binary tree of results/instances.
>>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
Any keyword arguments given to this function are passed to the class
initializer.
"""
count = len(args)
if not count:
raise ValueError("Called make_binary_tree with empty list")
elif count == 1:
return args[0]
half = count // 2
return fn(make_binary_tree(fn, args[:half], **kwargs),
make_binary_tree(fn, args[half:], **kwargs), **kwargs)
def make_weighted_tree(fn, ls, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
(weight, argument) tuples and returns a huffman-like weighted tree of
results/instances.
"""
if not ls:
raise ValueError("Called make_weighted_tree with empty list")
ls.sort()
while len(ls) > 1:
a = ls.pop(0)
b = ls.pop(0)
insort(ls, (a[0] + b[0], fn(a[1], b[1])))
return ls[0][1]
# Fibonacci function
_fib_cache = {}
def fib(n):
"""Returns the nth value in the Fibonacci sequence.
"""
if n <= 2:
return n
if n in _fib_cache:
return _fib_cache[n]
result = fib(n - 1) + fib(n - 2)
_fib_cache[n] = result
return result
# Decorators
def synchronized(func):
"""Decorator for storage-access methods, which synchronizes on a threading
lock. The parent object must have 'is_closed' and '_sync_lock' attributes.
"""
@wraps(func)
def synchronized_wrapper(self, *args, **kwargs):
with self._sync_lock:
return func(self, *args, **kwargs)
return synchronized_wrapper
def unclosed(method):
"""
Decorator to check if the object is closed.
"""
@wraps(method)
def unclosed_wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("Operation on a closed object")
return method(self, *args, **kwargs)
return unclosed_wrapper
| mit | 7,785,097,664,265,965,000 | 30.15493 | 79 | 0.689195 | false |
ruzhytskyi/Koans | python3/koans/about_class_attributes.py | 97 | 4668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog:
pass
def test_objects_are_objects(self):
fido = self.Dog()
self.assertEqual(__, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(__, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(__, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(__, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(__, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(__, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(__, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda : 'fidos wag'
self.assertEqual(__, fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
with self.assertRaises(___): rover.wag()
# ------------------------------------------------------------------
class Dog2:
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_since_classes_are_objects_you_can_define_singleton_methods_on_them_too(self):
self.assertRegexpMatches(self.Dog2.growl(), __)
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.growl(), __)
self.assertRegexpMatches(self.Dog2.growl(), __)
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertRegexpMatches(self.Dog2.bark(), __)
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.bark(), __)
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
with self.assertRaises(___): fido.name = "Fido"
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual(__, fido.get_name_from_instance())
self.assertEqual(__, self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual(__, fido.get_name())
self.assertEqual(__, self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4:
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_heres_an_easy_way_to_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual(__, fido.__class__.a_class_method())
| mit | -4,693,543,377,804,680,000 | 29.311688 | 92 | 0.569623 | false |
savoirfairelinux/django | tests/admin_inlines/admin.py | 17 | 5776 | from django import forms
from django.contrib import admin
from .models import (
Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2,
Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder,
Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked,
Inner4Tabular, NonAutoPKBook, NonAutoPKBookChild, Novel,
ParentModelWithCustomPk, Poll, Profile, ProfileCollection, Question,
ReadOnlyInline, ShoppingWeakness, Sighting, SomeChildModel,
SomeParentModel, SottoCapo, Title, TitleCollection,
)
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class NonAutoPKBookTabularInline(admin.TabularInline):
model = NonAutoPKBook
classes = ('collapse',)
class NonAutoPKBookChildTabularInline(admin.TabularInline):
model = NonAutoPKBookChild
classes = ('collapse',)
class NonAutoPKBookStackedInline(admin.StackedInline):
model = NonAutoPKBook
classes = ('collapse',)
class EditablePKBookTabularInline(admin.TabularInline):
model = EditablePKBook
class EditablePKBookStackedInline(admin.StackedInline):
model = EditablePKBook
class AuthorAdmin(admin.ModelAdmin):
inlines = [
BookInline, NonAutoPKBookTabularInline, NonAutoPKBookStackedInline,
EditablePKBookTabularInline, EditablePKBookStackedInline,
NonAutoPKBookChildTabularInline,
]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class ReadOnlyInlineInline(admin.TabularInline):
model = ReadOnlyInline
readonly_fields = ['name']
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(forms.ModelForm):
title1 = forms.CharField(max_length=100)
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4StackedInline(admin.StackedInline):
model = Inner4Stacked
show_change_link = True
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
show_change_link = True
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
# admin for #18433
class ChildModel1Inline(admin.TabularInline):
model = ChildModel1
class ChildModel2Inline(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
class BinaryTreeAdmin(admin.TabularInline):
model = BinaryTree
def get_extra(self, request, obj=None, **kwargs):
extra = 2
if obj:
return extra - obj.binarytree_set.count()
return extra
def get_max_num(self, request, obj=None, **kwargs):
max_num = 3
if obj:
return max_num - obj.binarytree_set.count()
return max_num
# admin for #19524
class SightingInline(admin.TabularInline):
model = Sighting
# admin and form for #18263
class SomeChildModelForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = SomeChildModel
widgets = {
'position': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].label = 'new label'
class SomeChildModelInline(admin.TabularInline):
model = SomeChildModel
form = SomeChildModelForm
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline])
site.register(ProfileCollection, inlines=[ProfileInline])
site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline])
site.register(BinaryTree, inlines=[BinaryTreeAdmin])
site.register(ExtraTerrestrial, inlines=[SightingInline])
site.register(SomeParentModel, inlines=[SomeChildModelInline])
site.register([Question, Inner4Stacked, Inner4Tabular])
| bsd-3-clause | -2,074,902,786,168,036,400 | 24.004329 | 95 | 0.720222 | false |
golismero/golismero | golismero/common.py | 8 | 38619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Common constants, classes and functions used across GoLismero.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = [
# Dynamically loaded modules, picks the fastest one available.
"pickle", "random", "json_encode", "json_decode",
# Helper functions.
"get_user_settings_folder", "get_default_config_file",
"get_default_user_config_file", "get_default_plugins_folder",
"get_data_folder", "get_wordlists_folder",
"get_install_folder", "get_tools_folder",
"get_profiles_folder", "get_profile", "get_available_profiles",
# Helper classes and decorators.
"Singleton", "decorator", "export_methods_as_functions",
"EmptyNewStyleClass",
# Configuration objects.
"OrchestratorConfig", "AuditConfig"
]
# Load the fast C version of pickle,
# if not available use the pure-Python version.
try:
import cPickle as pickle
except ImportError:
import pickle
# Import @decorator from the decorator module, if available.
# Otherwise define a simple but crude replacement.
try:
from decorator import decorator
except ImportError:
import functools
def decorator(w):
"""
The decorator module was not found. You can install it from:
http://pypi.python.org/pypi/decorator/
"""
def d(fn):
@functools.wraps(fn)
def x(*args, **kwargs):
return w(fn, *args, **kwargs)
return x
return d
try:
# The fastest JSON parser available for Python.
from cjson import decode as json_decode
from cjson import encode as json_encode
except ImportError:
try:
# Faster than the built-in module, usually found.
from simplejson import loads as json_decode
from simplejson import dumps as json_encode
except ImportError:
# Built-in module since Python 2.6, very very slow!
from json import loads as json_decode
from json import dumps as json_encode
# Other imports.
from netaddr import IPNetwork
from ConfigParser import RawConfigParser
from keyword import iskeyword
from os import path
import os
import random #noqa
import sys
# Remove the docstrings. This prevents errors when generating the API docs.
try:
json_encode.__doc__ = ""
except Exception:
_orig_json_encode = json_encode
def json_encode(*args, **kwargs):
return _orig_json_encode(*args, **kwargs)
try:
json_decode.__doc__ = ""
except Exception:
_orig_json_decode = json_decode
def json_decode(*args, **kwargs):
return _orig_json_decode(*args, **kwargs)
#------------------------------------------------------------------------------
# Helper class for instance creation without calling __init__().
class EmptyNewStyleClass (object):
pass
#------------------------------------------------------------------------------
_user_settings_folder = None
def get_user_settings_folder():
"""
Get the current user's GoLismero settings folder.
This folder will be used to store the various caches
and the user-defined plugins.
:returns: GoLismero settings folder.
:rtype: str
"""
# TODO: on Windows, use the roaming data folder instead.
# Return the cached value if available.
global _user_settings_folder
if _user_settings_folder:
return _user_settings_folder
# Get the user's home folder.
home = os.getenv("HOME") # Unix
if not home:
home = os.getenv("USERPROFILE") # Windows
# If all else fails, use the current directory.
if not home:
home = os.getcwd()
# Get the user settings folder.
folder = path.join(home, ".golismero")
# Make sure it ends with a slash.
if not folder.endswith(path.sep):
folder += path.sep
# Make sure it exists.
try:
os.makedirs(folder)
except Exception:
pass
# Cache the folder.
_user_settings_folder = folder
# Return the folder.
return folder
#------------------------------------------------------------------------------
def get_default_config_file():
"""
:returns:
Pathname of the default configuration file,
or None if it doesn't exist.
:rtype: str | None
"""
config_file = path.split(path.abspath(__file__))[0]
config_file = path.join(config_file, "..", "golismero.conf")
config_file = path.abspath(config_file)
if not path.isfile(config_file):
if path.sep == "/" and path.isfile("/etc/golismero.conf"):
config_file = "/etc/golismero.conf"
else:
config_file = None
return config_file
#------------------------------------------------------------------------------
def get_default_user_config_file():
"""
:returns:
Pathname of the default per-user configuration file,
or None if it doesn't exist.
:rtype: str | None
"""
config_file = path.join(get_user_settings_folder(), "user.conf")
if not path.isfile(config_file):
config_file = path.split(path.abspath(__file__))[0]
config_file = path.join(config_file, "..", "user.conf")
config_file = path.abspath(config_file)
if not path.isfile(config_file):
config_file = None
return config_file
#------------------------------------------------------------------------------
_install_folder = None
def get_install_folder():
"""
:returns: Pathname of the install folder.
:rtype: str
"""
global _install_folder
if not _install_folder:
pathname = path.split(path.abspath(__file__))[0]
pathname = path.join(pathname, "..")
pathname = path.abspath(pathname)
_install_folder = pathname
return _install_folder
#------------------------------------------------------------------------------
def get_tools_folder():
"""
:returns: Pathname of the bundled tools folder.
:rtype: str
"""
return path.join(get_install_folder(), "tools")
#------------------------------------------------------------------------------
def get_wordlists_folder():
"""
:returns: Pathname of the wordlists folder.
:rtype: str
"""
return path.join(get_install_folder(), "wordlist")
#------------------------------------------------------------------------------
def get_data_folder():
"""
:returns: Pathname of the data folder.
:rtype: str
"""
return path.join(get_install_folder(), "data")
#------------------------------------------------------------------------------
def get_default_plugins_folder():
"""
:returns: Default location for the plugins folder.
:rtype: str
"""
return path.join(get_install_folder(), "plugins")
#------------------------------------------------------------------------------
def get_profiles_folder():
"""
:returns: Pathname of the profiles folder.
:rtype: str
"""
return path.join(get_install_folder(), "profiles")
#------------------------------------------------------------------------------
def get_profile(name):
"""
Get the profile configuration file for the requested profile name.
:param name: Name of the profile.
:type name: str
:returns: Pathname of the profile configuration file.
:rtype: str
:raises ValueError: The name was invalid, or the profile was not found.
"""
# Trivial case.
if not name:
raise ValueError("No profile name given")
# Get the profiles folder.
profiles = get_profiles_folder()
# Get the filename for the requested profile.
filename = path.abspath(path.join(profiles, name + ".profile"))
# Check if it's outside the profiles folder or it doesn't exist.
if not profiles.endswith(path.sep):
profiles += path.sep
if not filename.startswith(profiles) or not path.isfile(filename):
raise ValueError("Profile not found: %r" % name)
# Return the filename.
return filename
#------------------------------------------------------------------------------
def get_available_profiles():
"""
:returns: Available profiles.
:rtype: set(str)
"""
profiles_folder = get_profiles_folder()
if not profiles_folder or not path.isdir(profiles_folder):
return set()
return {
path.splitext(name)[0]
for name in os.listdir(profiles_folder)
if name.endswith(".profile")
}
#------------------------------------------------------------------------------
class Singleton (object):
"""
Implementation of the Singleton pattern.
"""
# Variable where we keep the instance.
_instance = None
def __new__(cls):
# If the singleton has already been instanced, return it.
if cls._instance is not None:
return cls._instance
# Create the singleton's instance.
cls._instance = super(Singleton, cls).__new__(cls)
# Call the constructor.
cls.__init__(cls._instance)
# Delete the constructor so it won't be called again.
cls._instance.__init__ = object.__init__
cls.__init__ = object.__init__
# Return the instance.
return cls._instance
#------------------------------------------------------------------------------
def export_methods_as_functions(singleton, module):
"""
Export all methods from a Singleton instance as bare functions of a module.
:param singleton: Singleton instance to export.
:type singleton: Singleton
:param module: Target module name.
This would typically be \\_\\_name\\_\\_.
:type module: str
:raises KeyError: No module with that name is loaded.
"""
# TODO: maybe take the module name as input instead,
# and pull everything else from sys.modules.
clazz = singleton.__class__
module_obj = sys.modules[module]
try:
exports = module_obj.__all__
except AttributeError:
exports = module_obj.__all__ = []
for name in dir(clazz):
if name[0] != "_":
unbound = getattr(clazz, name)
if callable(unbound) and not isinstance(unbound, property):
bound = getattr(singleton, name)
setattr(module_obj, name, bound)
if name not in exports:
exports.append(name)
#------------------------------------------------------------------------------
class Configuration (object):
"""
Generic configuration class.
"""
#--------------------------------------------------------------------------
# The logic in configuration classes is always:
# - Checking options without fixing them is done in check_params().
# - Sanitizing (fixing) options is done in parsers or in property setters.
# - For each source, there's a "from_*" method. They add to the
# current options rather than overwriting them completely.
# This allows options to be read from multiple sources.
#--------------------------------------------------------------------------
# Here's where subclasses define the options.
#
# It's a dictionary of tuples of the following format:
#
# name: ( parser, default )
#
# Where "name" is the option name, "parser" is an optional
# callback to parse the input values, and "default" is an
# optional default value.
#
# If no parser is given, the values are preserved when set.
#
# Example:
# class MySettings(Configuration):
# _settings_ = {
# "verbose": (int, 0), # A complete definition.
# "output_file": str, # Omitting the default value (None is used).
# "data": None, # Omitting the parser too.
# }
#
_settings_ = dict()
# This is a set of properties that may not be loaded from a config file.
# They will still be loaded from objects, dictionaries, JSON, etc.
_forbidden_ = set()
#--------------------------------------------------------------------------
# Some helper parsers.
@staticmethod
def string(x):
if x is None:
return None
if isinstance(x, unicode):
return x.encode("UTF-8")
return str(x)
@staticmethod
def integer(x):
if type(x) in (int, long):
return x
return int(x, 0) if x else 0
@staticmethod
def integer_or_none(x):
if x is None or (hasattr(x, "lower") and
x.lower() in ("", "none", "inf", "infinite")):
return None
return Configuration.integer(x)
@staticmethod
def float(x):
return float(x) if x else 0.0
@staticmethod
def comma_separated_list(x):
if not x:
return []
if isinstance(x, str):
return [t.strip() for t in x.split(",")]
if isinstance(x, unicode):
return [t.strip().encode("UTF-8") for t in x.split(u",")]
return list(x)
@staticmethod
def boolean(x):
if not x:
return False
if x is True:
return x
if hasattr(x, "lower"):
return {
"enabled": True, # True
"enable": True,
"true": True,
"yes": True,
"y": True,
"1": True,
"disabled": False, # False
"disable": False,
"false": False,
"no": False,
"f": False,
"0": False,
}.get(x.lower(), bool(x))
return bool(x)
@staticmethod
def trinary(x):
if x in (None, True, False):
return x
if not hasattr(x, "lower"):
raise ValueError(
"Trinary values only accept True, False and None")
try:
return {
"enabled": True, # True
"enable": True,
"true": True,
"yes": True,
"y": True,
"1": True,
"disabled": False, # False
"disable": False,
"false": False,
"no": False,
"f": False,
"0": False,
"default": None, # None
"def": None,
"none": None,
"maybe": None,
"?": None,
"-1": None,
}[x.lower()]
except KeyError:
raise ValueError("Unknown value: %r" % x)
#--------------------------------------------------------------------------
def __init__(self):
history = set()
for name, definition in self._settings_.iteritems():
if name in history:
raise SyntaxError("Duplicated option name: %r" % name)
history.add(name)
if type(definition) not in (tuple, list):
definition = (definition, None)
self.__init_option(name, *definition)
#--------------------------------------------------------------------------
def __init_option(self, name, parser = None, default = None):
if name.endswith("_") or not name.replace("_", "").isalnum():
msg = "Option name %r is not a valid Python identifier"
raise SyntaxError(msg % name)
if iskeyword(name):
msg = "Option name %r is a Python reserved keyword"
raise SyntaxError(msg % name)
if name.startswith("__"):
msg = "Option name %r is a private Python identifier"
raise SyntaxError(msg % name)
if name.startswith("_"):
msg = "Option name %r is a protected Python identifier"
raise SyntaxError(msg % name)
if parser is not None and not callable(parser):
msg = "Option parser cannot be of type %s"
raise SyntaxError(msg % type(parser))
setattr(self, name, default)
#--------------------------------------------------------------------------
def __setattr__(self, name, value):
if not name.startswith("_"):
definition = self._settings_.get(name, (None, None))
if type(definition) not in (tuple, list):
definition = (definition, None)
parser = definition[0]
if parser is not None:
value = parser(value)
object.__setattr__(self, name, value)
#--------------------------------------------------------------------------
def check_params(self):
"""
Check if parameters are valid. Raises an exception otherwise.
This method only checks the validity of the arguments,
it won't modify them.
:raises ValueError: The parameters are incorrect.
"""
return
#--------------------------------------------------------------------------
def from_dictionary(self, args):
"""
Get the settings from a Python dictionary.
:param args: Settings.
:type args: dict(str -> \\*)
"""
for name, value in args.iteritems():
if name in self._settings_:
setattr(self, name, value)
#--------------------------------------------------------------------------
def from_object(self, args):
"""
Get the settings from the attributes of a Python object.
:param args:
Python object,
for example the command line arguments parsed by argparse.
:type args: object
"""
# Builds a dictionary with the object's public attributes.
args = {
k : getattr(args, k)
for k in dir(args) if not k.startswith("_")
}
# Remove all attributes whose values are None.
args = { k:v for k,v in args.iteritems() if v is not None }
# Extract the settings from the dictionary.
if args:
self.from_dictionary(args)
#--------------------------------------------------------------------------
def from_json(self, json_raw_data):
"""
Get the settings from a JSON encoded dictionary.
:param json_raw_data: JSON raw data.
:type json_raw_data: str
"""
# Converts the JSON data into a dictionary.
args = json_decode(json_raw_data)
if not isinstance(args, dict):
raise TypeError("Invalid JSON data")
# Extract the settings from the dictionary.
if args:
self.from_dictionary(args)
#--------------------------------------------------------------------------
def from_config_file(self, config_file, allow_profile = False):
"""
Get the settings from a configuration file.
:param config_file: Configuration file.
:type config_file: str
:param allow_profile: True to allow reading the profile name
from the config file, False to forbid it. Global config
files should allow setting a default profile, but profile
config files should not, as it wouldn't make sense.
"""
parser = RawConfigParser()
parser.read(config_file)
if parser.has_section("golismero"):
options = { k:v for k,v in parser.items("golismero") if v }
if "profile" in options:
if allow_profile:
self.profile = options["profile"]
self.profile_file = get_profile(self.profile)
else:
del options["profile"]
for k in self._forbidden_:
if k in options:
del options[k]
if options:
self.from_dictionary(options)
#--------------------------------------------------------------------------
def to_dictionary(self):
"""
Copy the settings to a Python dictionary.
:returns: Dictionary that maps the setting names to their values.
:rtype: dict(str -> \\*)
"""
result = {}
for name, definition in self._settings_.iteritems():
default = None
if type(definition) in (tuple, list) and len(definition) > 1:
default = definition[1]
value = getattr(self, name, default)
result[name] = value
return result
#--------------------------------------------------------------------------
def to_json(self):
"""
Copy the settings to a JSON encoded dictionary.
:returns: Settings as a JSON encoded dictionary.
:rtype: str
"""
# Extract the settings to a dictionary and encode it with JSON.
return json_encode( self.to_dictionary() )
#------------------------------------------------------------------------------
class OrchestratorConfig (Configuration):
"""
Orchestrator configuration object.
"""
#--------------------------------------------------------------------------
# The options definitions, they will be read from the config file:
#
_forbidden_ = set(( # except for these:
"config_file", "user_config_file",
"profile_file", "plugin_args", "ui_mode",
))
_settings_ = {
#
# Main options.
#
# UI mode.
"ui_mode": (str, "console"),
# Verbosity level.
"verbose": (Configuration.integer, 2),
# Colorize console?
"color": (Configuration.boolean, False),
#
# Plugin options.
#
# Enabled plugins.
"enable_plugins": (Configuration.comma_separated_list, ["all"]),
# Disabled plugins.
"disable_plugins": (Configuration.comma_separated_list, []),
# Plugins folder.
"plugins_folder": Configuration.string,
# Maximum number plugins to execute concurrently.
"max_concurrent": (Configuration.integer, 4),
#
# Network options.
#
# Maximum number of connections per host.
"max_connections": (Configuration.integer, 20),
# Use persistent cache?
"use_cache_db": (Configuration.boolean, True),
# When run as a service.
"listen_address": Configuration.string,
"listen_port": Configuration.integer,
"server_push": Configuration.string,
}
#--------------------------------------------------------------------------
# Options that are only set in runtime, not loaded from the config file.
# Configuration files.
config_file = get_default_config_file()
user_config_file = get_default_user_config_file()
# Profile.
profile = None
profile_file = None
# Plugin arguments.
plugin_args = dict() # plugin_id -> key -> value
#--------------------------------------------------------------------------
@staticmethod
def _load_profile(self, args):
if "profile" in args:
self.profile = args["profile"]
if isinstance(self.profile, unicode):
self.profile = self.profile.encode("UTF-8")
self.profile_file = get_profile(self.profile)
@staticmethod
def _load_plugin_args(self, args):
if "plugin_args" in args:
plugin_args = {}
for (plugin_id, target_args) in args["plugin_args"].iteritems():
if isinstance(plugin_id, unicode):
plugin_id = plugin_id.encode("UTF-8")
if not plugin_id in plugin_args:
plugin_args[plugin_id] = {}
for (key, value) in target_args.iteritems():
if isinstance(key, unicode):
key = key.encode("UTF-8")
if isinstance(value, unicode):
value = value.encode("UTF-8")
plugin_args[plugin_id][key] = value
self.plugin_args = plugin_args
def from_dictionary(self, args):
# Security note: do not copy config filenames!
# See the _forbidden_ property.
super(OrchestratorConfig, self).from_dictionary(args)
self._load_profile(self, args) # "self" is twice on purpose!
self._load_plugin_args(self, args) # don't change it or it breaks
def to_dictionary(self):
result = super(OrchestratorConfig, self).to_dictionary()
result["config_file"] = self.config_file
result["user_config_file"] = self.user_config_file
result["profile"] = self.profile
result["profile_file"] = self.profile_file
result["plugin_args"] = self.plugin_args
return result
#--------------------------------------------------------------------------
def check_params(self):
# Validate the network connections limit.
if self.max_connections < 1:
raise ValueError(
"Number of connections must be greater than 0,"
" got %i." % self.max_connections)
# Validate the number of concurrent processes.
if self.max_concurrent < 0:
raise ValueError(
"Number of processes cannot be a negative number,"
" got %i." % self.max_concurrent)
# Validate the list of plugins.
if not self.enable_plugins:
raise ValueError("No plugins selected for execution.")
if set(self.enable_plugins).intersection(self.disable_plugins):
raise ValueError(
"Conflicting plugins selection, aborting execution.")
#------------------------------------------------------------------------------
class AuditConfig (Configuration):
"""
Audit configuration object.
"""
#--------------------------------------------------------------------------
# The options definitions, they will be read from the config file:
#
_forbidden = set(( # except for these:
"config_file", "user_config_file", "profile_file", "plugin_args",
"plugin_load_overrides", "command",
))
_settings_ = {
#
# Main options
#
# Targets
"targets": (Configuration.comma_separated_list, []),
#
# Report options
#
# Output files
"reports": (Configuration.comma_separated_list, []),
# Only display vulnerabilities
"only_vulns": (Configuration.trinary, None),
#
# Audit options
#
# Audit name
"audit_name": Configuration.string,
# Audit database
"audit_db": (None, ":memory:"),
# Input files
"imports": (Configuration.comma_separated_list, []),
# Redo the audit?
"redo": (Configuration.boolean, False),
#
# Plugin options
#
# Enabled plugins
"enable_plugins": (Configuration.comma_separated_list, ["all"]),
# Disabled plugins
"disable_plugins": (Configuration.comma_separated_list, []),
# Plugin execution timeout
"plugin_timeout": (Configuration.float, 3600.0),
#
# Network options
#
# Include subdomains?
"include_subdomains": (Configuration.boolean, True),
# Include parent folders?
"allow_parent": (Configuration.boolean, True),
# Depth level for spider
"depth": (Configuration.integer_or_none, 1),
# Limits
"max_links" : (Configuration.integer, 0), # 0 -> infinite
# Follow redirects
"follow_redirects": (Configuration.boolean, True),
# Follow a redirection on the target URL itself,
# regardless of "follow_redirects"
"follow_first_redirect": (Configuration.boolean, True),
# Proxy options
"proxy_addr": Configuration.string,
"proxy_port": Configuration.integer,
"proxy_user": Configuration.string,
"proxy_pass": Configuration.string,
# Cookie
"cookie": Configuration.string,
# User Agent
"user_agent": Configuration.string,
}
#--------------------------------------------------------------------------
# Options that are only set in runtime, not loaded from the config file.
# Configuration files.
config_file = get_default_config_file()
user_config_file = get_default_user_config_file()
# Profiles.
profile = None
profile_file = None
# Plugin arguments.
plugin_args = None # list of (plugin_id, key, value)
# Plugin load overrides.
plugin_load_overrides = None
# Command to run.
command = "SCAN"
#--------------------------------------------------------------------------
def from_dictionary(self, args):
# Security note: do not copy config filenames!
# See the _forbidden_ property.
super(AuditConfig, self).from_dictionary(args)
OrchestratorConfig._load_profile(self, args) # not a filename
OrchestratorConfig._load_plugin_args(self, args)
# Load the "command" property.
if "command" in args:
self.command = args["command"]
if isinstance(self.command, unicode):
self.command = self.command.encode("UTF-8")
# Load the "plugin_load_overrides" property.
if "plugin_load_overrides" in args:
if not self.plugin_load_overrides:
self.plugin_load_overrides = []
for (val, plugin_id) in args["plugin_load_overrides"]:
self.plugin_load_overrides.append((bool(val), str(plugin_id)))
#--------------------------------------------------------------------------
def to_dictionary(self):
result = super(AuditConfig, self).to_dictionary()
result["config_file"] = self.config_file
result["user_config_file"] = self.user_config_file
result["profile"] = self.profile
result["profile_file"] = self.profile_file
result["plugin_args"] = self.plugin_args
result["command"] = self.command
result["plugin_load_overrides"] = self.plugin_load_overrides
return result
#--------------------------------------------------------------------------
@property
def targets(self):
return self._targets
@targets.setter
def targets(self, targets):
# Always append, never overwrite.
# Fix target URLs if the scheme part is missing.
# Make sure self._targets contains a list.
self._targets = getattr(self, "_targets", [])
# Ignore the trivial case.
if not targets:
return
# Strip whitespace.
targets = [
x.strip()
for x in targets
if x not in self._targets
]
# Remove duplicates.
targets = [
x
for x in set(targets)
if x not in self._targets
]
# Encode all Unicode strings as UTF-8.
targets = [
x.encode("UTF-8") if isinstance(x, unicode) else str(x)
for x in targets
if x not in self._targets
]
# Detect network ranges, like 30.30.30.0/24, and get all IPs on it.
parsed_targets = []
for host in targets:
# Try to parse the address as a network range.
try:
tmp_target = IPNetwork(host)
except:
parsed_targets.append(host)
continue
# If it's a range, iterate it and get all IP addresses.
# If it's a single IP address, just add it.
if tmp_target.size != 1:
parsed_targets.extend(
str(x) for x in tmp_target.iter_hosts()
)
else:
parsed_targets.append( str(tmp_target.ip) )
# Add the new targets.
self._targets.extend(parsed_targets)
@targets.deleter
def targets(self):
self._targets = []
#--------------------------------------------------------------------------
@property
def imports(self):
return self._imports
@imports.setter
def imports(self, imports):
# Always append, never overwrite.
self._imports = getattr(self, "_imports", [])
if imports:
self._imports.extend( (str(x) if x else None) for x in imports )
#--------------------------------------------------------------------------
@property
def reports(self):
return self._reports
@reports.setter
def reports(self, reports):
# Always append, never overwrite.
self._reports = getattr(self, "_reports", [])
if reports:
self._reports.extend( (str(x) if x else None) for x in reports )
#--------------------------------------------------------------------------
@property
def audit_db(self):
return self._audit_db
@audit_db.setter
def audit_db(self, audit_db):
if (
not audit_db or not audit_db.strip() or
audit_db.strip().lower() == ":auto:"
):
audit_db = ":auto:"
elif audit_db.strip().lower() == ":memory:":
audit_db = ":memory:"
self._audit_db = audit_db
#--------------------------------------------------------------------------
@property
def user_agent(self):
return self._user_agent
@user_agent.setter
def user_agent(self, user_agent):
if user_agent:
if isinstance(user_agent, unicode):
user_agent = user_agent.encode("UTF-8")
self._user_agent = user_agent
else:
self._user_agent = None
#--------------------------------------------------------------------------
@property
def cookie(self):
return self._cookie
@cookie.setter
def cookie(self, cookie):
if cookie:
# Parse the cookies argument.
try:
if isinstance(cookie, unicode):
cookie = cookie.encode("UTF-8")
# Prepare cookie.
cookie = cookie.replace(" ", "").replace("=", ":")
# Remove 'Cookie:' start, if exits.
if cookie.startswith("Cookie:"):
cookie = cookie[len("Cookie:"):]
# Split.
cookie = cookie.split(";")
# Parse.
cookie = { c.split(":")[0]:c.split(":")[1] for c in cookie}
except ValueError:
raise ValueError(
"Invalid cookie format specified."
" Use this format: 'Key=value; key=value'.")
else:
cookie = None
self._cookie = cookie
#--------------------------------------------------------------------------
@property
def proxy_addr(self):
return self._proxy_addr
@proxy_addr.setter
def proxy_addr(self, proxy_addr):
if proxy_addr:
proxy_addr = proxy_addr.strip()
if isinstance(proxy_addr, unicode):
proxy_addr = proxy_addr.encode("UTF-8")
if ":" in proxy_addr:
proxy_addr, proxy_port = proxy_addr.split(":", 1)
proxy_addr = proxy_addr.strip()
proxy_port = proxy_port.strip()
self.proxy_port = proxy_port
self._proxy_addr = proxy_addr
else:
self._proxy_addr = None
#--------------------------------------------------------------------------
@property
def proxy_port(self):
return self._proxy_port
@proxy_port.setter
def proxy_port(self, proxy_port):
if proxy_port:
self._proxy_port = int(proxy_port)
if self._proxy_port < 1 or self._proxy_port > 65534:
raise ValueError(
"Invalid proxy port number: %d" % self._proxy_port)
else:
self._proxy_port = None
#--------------------------------------------------------------------------
def check_params(self):
# Validate the list of plugins.
if not self.enable_plugins:
raise ValueError(
"No plugins selected for execution.")
if set(self.enable_plugins).intersection(self.disable_plugins):
raise ValueError(
"Conflicting plugins selection, aborting execution.")
# Validate the recursion depth.
if self.depth is not None and self.depth < 0:
raise ValueError(
"Spidering depth can't be negative: %r" % self.depth)
if self.depth is not None and self.depth == 0:
raise ValueError(
"Spidering depth can't be zero (nothing would be done!)")
#--------------------------------------------------------------------------
def is_new_audit(self):
"""
Determine if this is a brand new audit.
:returns: True if this is a new audit, False if it's an old audit.
:rtype: bool
"""
# Memory databases are always new audits.
if (
not self.audit_db or not self.audit_db.strip() or
self.audit_db.strip().lower() == ":memory:"
):
self.audit_db = ":memory:"
return True
# SQLite databases are new audits if the file doesn't exist.
# If we have no filename, use the audit name.
# If we don't have that either it's a new audit.
filename = self.audit_db
if not filename:
filename = self.audit_name + ".db"
if not filename:
return True
return not path.exists(filename)
| gpl-2.0 | -6,349,195,210,402,020,000 | 30.321168 | 79 | 0.512649 | false |
dednal/chromium.src | build/android/pylib/remote/device/remote_device_test_run.py | 9 | 9695 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run specific test on specific environment."""
import logging
import os
import sys
import tempfile
import time
import zipfile
from pylib import constants
from pylib.base import test_run
from pylib.remote.device import appurify_sanitized
from pylib.remote.device import remote_device_helper
from pylib.utils import zip_utils
class RemoteDeviceTestRun(test_run.TestRun):
"""Run gtests and uirobot tests on a remote device."""
WAIT_TIME = 5
COMPLETE = 'complete'
HEARTBEAT_INTERVAL = 300
def __init__(self, env, test_instance):
"""Constructor.
Args:
env: Environment the tests will run in.
test_instance: The test that will be run.
"""
super(RemoteDeviceTestRun, self).__init__(env, test_instance)
self._env = env
self._test_instance = test_instance
self._app_id = ''
self._test_id = ''
self._results = ''
self._test_run_id = ''
#override
def RunTests(self):
"""Run the test."""
if self._env.trigger:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_start_res = appurify_sanitized.api.tests_run(
self._env.token, self._env.device, self._app_id, self._test_id)
remote_device_helper.TestHttpResponse(
test_start_res, 'Unable to run test.')
self._test_run_id = test_start_res.json()['response']['test_run_id']
logging.info('Test run id: %s' % self._test_run_id)
if not self._env.collect:
assert isinstance(self._env.trigger, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.trigger, 'w') as test_run_id_file:
test_run_id_file.write(self._test_run_id)
if self._env.collect:
if not self._env.trigger:
assert isinstance(self._env.trigger, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.collect, 'r') as test_run_id_file:
self._test_run_id = test_run_id_file.read().strip()
current_status = ''
timeout_counter = 0
heartbeat_counter = 0
while self._GetTestStatus(self._test_run_id) != self.COMPLETE:
if self._results['detailed_status'] != current_status:
logging.info('Test status: %s', self._results['detailed_status'])
current_status = self._results['detailed_status']
timeout_counter = 0
heartbeat_counter = 0
if heartbeat_counter > self.HEARTBEAT_INTERVAL:
logging.info('Test status: %s', self._results['detailed_status'])
heartbeat_counter = 0
timeout = self._env.timeouts.get(
current_status, self._env.timeouts['unknown'])
if timeout_counter > timeout:
raise remote_device_helper.RemoteDeviceError(
'Timeout while in %s state for %s seconds'
% (current_status, timeout))
time.sleep(self.WAIT_TIME)
timeout_counter += self.WAIT_TIME
heartbeat_counter += self.WAIT_TIME
self._DownloadTestResults(self._env.results_path)
return self._ParseTestResults()
#override
def TearDown(self):
"""Tear down the test run."""
if (self._env.collect
and self._GetTestStatus(self._test_run_id) != self.COMPLETE):
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_abort_res = appurify_sanitized.api.tests_abort(
self._env.token, self._test_run_id, reason='Test runner exiting.')
remote_device_helper.TestHttpResponse(test_abort_res,
'Unable to abort test.')
def __enter__(self):
"""Set up the test run when used as a context manager."""
self.SetUp()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Tear down the test run when used as a context manager."""
self.TearDown()
#override
def SetUp(self):
"""Set up a test run."""
if self._env.trigger:
self._TriggerSetUp()
def _TriggerSetUp(self):
"""Set up the triggering of a test run."""
raise NotImplementedError
def _ParseTestResults(self):
raise NotImplementedError
def _GetTestByName(self, test_name):
"""Gets test_id for specific test.
Args:
test_name: Test to find the ID of.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_list_res = appurify_sanitized.api.tests_list(self._env.token)
remote_device_helper.TestHttpResponse(test_list_res,
'Unable to get tests list.')
for test in test_list_res.json()['response']:
if test['test_type'] == test_name:
return test['test_id']
raise remote_device_helper.RemoteDeviceError(
'No test found with name %s' % (test_name))
def _DownloadTestResults(self, results_path):
"""Download the test results from remote device service.
Args:
results_path: path to download results to.
"""
if results_path:
logging.info('Downloading results to %s.' % results_path)
if not os.path.exists(os.path.basename(results_path)):
os.makedirs(os.path.basename(results_path))
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
appurify_sanitized.utils.wget(self._results['results']['url'],
results_path)
def _GetTestStatus(self, test_run_id):
"""Checks the state of the test, and sets self._results
Args:
test_run_id: Id of test on on remote service.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_check_res = appurify_sanitized.api.tests_check_result(
self._env.token, test_run_id)
remote_device_helper.TestHttpResponse(test_check_res,
'Unable to get test status.')
self._results = test_check_res.json()['response']
return self._results['status']
def _AmInstrumentTestSetup(self, app_path, test_path, runner_package):
config = {'runner': runner_package}
self._app_id = self._UploadAppToDevice(app_path)
data_deps = self._test_instance.GetDataDependencies()
if data_deps:
with tempfile.NamedTemporaryFile(suffix='.zip') as test_with_deps:
sdcard_files = []
host_test = os.path.basename(test_path)
with zipfile.ZipFile(test_with_deps.name, 'w') as zip_file:
zip_file.write(test_path, host_test, zipfile.ZIP_DEFLATED)
for h, _ in data_deps:
zip_utils.WriteToZipFile(zip_file, h, '.')
if os.path.isdir(h):
sdcard_files.extend(os.listdir(h))
else:
sdcard_files.extend(h)
config['sdcard_files'] = ','.join(sdcard_files)
config['host_test'] = host_test
self._test_id = self._UploadTestToDevice(
'robotium', test_with_deps.name)
else:
self._test_id = self._UploadTestToDevice('robotium', test_path)
logging.info('Setting config: %s' % config)
self._SetTestConfig('robotium', config)
def _UploadAppToDevice(self, app_path):
"""Upload app to device."""
logging.info('Uploading %s to remote service.', app_path)
with open(app_path, 'rb') as apk_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.apps_upload(
self._env.token, apk_src, 'raw', name=self._test_instance.suite)
remote_device_helper.TestHttpResponse(
upload_results, 'Unable to upload %s.' % app_path)
return upload_results.json()['response']['app_id']
def _UploadTestToDevice(self, test_type, test_path):
"""Upload test to device
Args:
test_type: Type of test that is being uploaded. Ex. uirobot, gtest..
"""
logging.info('Uploading %s to remote service.' % test_path)
with open(test_path, 'rb') as test_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.tests_upload(
self._env.token, test_src, 'raw', test_type)
remote_device_helper.TestHttpResponse(upload_results,
'Unable to upload %s.' % test_path)
return upload_results.json()['response']['test_id']
def _SetTestConfig(self, runner_type, body):
"""Generates and uploads config file for test.
Args:
extras: Extra arguments to set in the config file.
"""
logging.info('Generating config file for test.')
with tempfile.TemporaryFile() as config:
config_data = ['[appurify]', '[%s]' % runner_type]
config_data.extend('%s=%s' % (k, v) for k, v in body.iteritems())
config.write(''.join('%s\n' % l for l in config_data))
config.flush()
config.seek(0)
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
config_response = appurify_sanitized.api.config_upload(
self._env.token, config, self._test_id)
remote_device_helper.TestHttpResponse(
config_response, 'Unable to upload test config.')
| bsd-3-clause | 7,865,468,636,554,381,000 | 38.571429 | 78 | 0.613925 | false |
Tomsod/gemrb | gemrb/GUIScripts/bg2/GUICG9.py | 7 | 2316 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, proficiencies (GUICG9)
import GemRB
from GUIDefines import *
from ie_stats import *
import LUProfsSelection
SkillWindow = 0
DoneButton = 0
MyChar = 0
def RedrawSkills():
ProfsPointsLeft = GemRB.GetVar ("ProfsPointsLeft")
if not ProfsPointsLeft:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OnLoad():
global SkillWindow, DoneButton, MyChar
GemRB.LoadWindowPack("GUICG", 640, 480)
SkillWindow = GemRB.LoadWindow(9)
MyChar = GemRB.GetVar ("Slot")
Levels = [GemRB.GetPlayerStat (MyChar, IE_LEVEL), GemRB.GetPlayerStat (MyChar, IE_LEVEL2), \
GemRB.GetPlayerStat (MyChar, IE_LEVEL3)]
LUProfsSelection.SetupProfsWindow (MyChar, \
LUProfsSelection.LUPROFS_TYPE_CHARGEN, SkillWindow, RedrawSkills, [0,0,0], Levels)
BackButton = SkillWindow.GetControl(77)
BackButton.SetText(15416)
BackButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
DoneButton = SkillWindow.GetControl(0)
DoneButton.SetText(11973)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
RedrawSkills()
SkillWindow.SetVisible(WINDOW_VISIBLE)
return
def BackPress():
if SkillWindow:
SkillWindow.Unload()
GemRB.SetNextScript("CharGen6")
#scrap skills
return
def NextPress():
if SkillWindow:
SkillWindow.Unload()
LUProfsSelection.ProfsSave (MyChar, LUProfsSelection.LUPROFS_TYPE_CHARGEN)
GemRB.SetNextScript("CharGen7") #appearance
return
| gpl-2.0 | 8,402,356,270,781,358,000 | 29.473684 | 93 | 0.770725 | false |
wanghuan1115/sdkbox-vungle-sample | cpp/cocos2d/tools/gen-prebuilt/gen_prebuilt_libs.py | 80 | 13293 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# generate the prebuilt libs of engine
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
'''
Generate the prebuilt libs of engine
'''
import os
import subprocess
import shutil
import sys
import excopy
import json
from argparse import ArgumentParser
if sys.platform == 'win32':
import _winreg
TESTS_PROJ_PATH = "tests/lua-tests"
ANDROID_SO_PATH = "project/proj.android/libs"
ANDROID_A_PATH = "project/proj.android/obj/local"
MK_PATH = "project/proj.android/jni/Application.mk"
CONSOLE_PATH = "tools/cocos2d-console/bin"
def os_is_win32():
return sys.platform == 'win32'
def os_is_mac():
return sys.platform == 'darwin'
def run_shell(cmd, cwd=None):
p = subprocess.Popen(cmd, shell=True, cwd=cwd)
p.wait()
if p.returncode:
raise subprocess.CalledProcessError(returncode=p.returncode, cmd=cmd)
return p.returncode
class Generator(object):
XCODE_CMD_FMT = "xcodebuild -project \"%s\" -configuration Release -target \"%s\" %s CONFIGURATION_BUILD_DIR=%s"
CONFIG_FILE = "build_config.json"
KEY_XCODE_PROJ_INFO = "xcode_proj_info"
KEY_WIN32_PROJ_INFO = "win32_proj_info"
KEY_OUTPUT_DIR = "outputdir"
KEY_TARGETS = "targets"
def __init__(self, args):
self.need_clean = args.need_clean
self.disable_strip = args.disable_strip
self.use_incredibuild = args.use_incredibuild
self.tool_dir = os.path.realpath(os.path.dirname(__file__))
self.no_android = args.no_android
self.engine_dir = os.path.join(self.tool_dir, os.path.pardir, os.path.pardir)
self.load_config()
def load_config(self):
cfg_json = os.path.join(self.tool_dir, Generator.CONFIG_FILE)
f = open(cfg_json)
cfg_info = json.load(f)
f.close()
self.xcode_proj_info = cfg_info[Generator.KEY_XCODE_PROJ_INFO]
self.win32_proj_info = cfg_info[Generator.KEY_WIN32_PROJ_INFO]
def modify_mk(self, mk_file):
if os.path.isfile(mk_file):
file_obj = open(mk_file, "a")
file_obj.write("\nAPP_ABI :=armeabi armeabi-v7a\n")
file_obj.close()
def build_android(self):
# build .a for android
console_dir = os.path.join(self.engine_dir, CONSOLE_PATH)
cmd_path = os.path.join(console_dir, "cocos")
proj_path = os.path.join(self.engine_dir, TESTS_PROJ_PATH)
# Add multi ABI in Application.mk
mk_file = os.path.join(proj_path, MK_PATH)
f = open(mk_file)
file_content = f.read()
f.close()
self.modify_mk(mk_file)
# build it
build_cmd = "%s compile -s %s -p android --ndk-mode release -j 4" % (cmd_path, proj_path)
run_shell(build_cmd)
f = open(mk_file, "w")
f.write(file_content)
f.close()
# copy .a to prebuilt dir
obj_dir = os.path.join(proj_path, ANDROID_A_PATH)
prebuilt_dir = os.path.join(self.tool_dir, "prebuilt", "android")
copy_cfg = {
"from": obj_dir,
"to": prebuilt_dir,
"include": [
"*.a$"
]
}
excopy.copy_files_with_config(copy_cfg, obj_dir, prebuilt_dir)
if not self.disable_strip:
# strip the android libs
ndk_root = os.environ["NDK_ROOT"]
if os_is_win32():
if self.is_32bit_windows():
bit_str = ""
else:
bit_str = "-x86_64"
sys_folder_name = "windows%s" % bit_str
elif os_is_mac():
sys_folder_name = "darwin-x86_64"
strip_cmd_path = os.path.join(ndk_root, "toolchains/arm-linux-androideabi-4.8/prebuilt/%s/arm-linux-androideabi/bin/strip" % sys_folder_name)
if os.path.exists(strip_cmd_path):
strip_cmd = "%s -S %s/armeabi*/*.a" % (strip_cmd_path, prebuilt_dir)
run_shell(strip_cmd)
def get_required_vs_version(self, proj_file):
# get the VS version required by the project
import re
file_obj = open(proj_file)
pattern = re.compile(r"^# Visual Studio.+(\d{4})")
num = None
for line in file_obj:
match = pattern.match(line)
if match is not None:
num = match.group(1)
break
if num is not None:
if num == "2012":
ret = "11.0"
elif num == "2013":
ret = "12.0"
else:
ret = None
else:
ret = None
return ret
def get_vs_cmd_path(self, require_version):
# find the VS in register, if system is 64bit, should find vs in both 32bit & 64bit register
if self.is_32bit_windows():
reg_flag_list = [ _winreg.KEY_WOW64_32KEY ]
else:
reg_flag_list = [ _winreg.KEY_WOW64_64KEY, _winreg.KEY_WOW64_32KEY ]
needUpgrade = False
vsPath = None
try:
for reg_flag in reg_flag_list:
print("find vs in reg : %s" % ("32bit" if reg_flag == _winreg.KEY_WOW64_32KEY else "64bit"))
vs = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio",
0,
_winreg.KEY_READ | reg_flag
)
try:
i = 0
while True:
try:
# enum the keys in vs reg
version = _winreg.EnumKey(vs, i)
find_ver = float(version)
# find the vs which version >= required version
if find_ver >= float(require_version):
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
if float(version) > float(require_version):
needUpgrade = True
break
else:
vsPath = None
except:
continue
finally:
i += 1
except:
pass
# if find one right vs, break
if vsPath is not None:
break
except WindowsError as e:
message = "Visual Studio wasn't installed"
print(e)
raise Exception(message)
commandPath = os.path.join(vsPath, "Common7", "IDE", "devenv")
return (needUpgrade, commandPath)
def is_32bit_windows(self):
arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
archw = os.environ.has_key("PROCESSOR_ARCHITEW6432")
return (arch == "x86" and not archw)
def build_win32_proj(self, cmd_path, sln_path, proj_name, mode):
build_cmd = " ".join([
"\"%s\"" % cmd_path,
"\"%s\"" % sln_path,
"/%s \"Release|Win32\"" % mode,
"/Project \"%s\"" % proj_name
])
run_shell(build_cmd)
def build_win32(self):
print("Building Win32")
for key in self.win32_proj_info.keys():
output_dir = self.win32_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
require_vs_version = self.get_required_vs_version(proj_path)
needUpgrade, vs_command = self.get_vs_cmd_path(require_vs_version)
# get the build folder & win32 output folder
build_folder_path = os.path.join(os.path.dirname(proj_path), "Release.win32")
if os.path.exists(build_folder_path):
shutil.rmtree(build_folder_path)
os.makedirs(build_folder_path)
win32_output_dir = os.path.join(self.tool_dir, output_dir)
if os.path.exists(win32_output_dir):
shutil.rmtree(win32_output_dir)
os.makedirs(win32_output_dir)
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % vs_command,
"\"%s\"" % proj_path,
"/Upgrade"
])
run_shell(commandUpgrade)
if self.use_incredibuild:
# use incredibuild, build whole sln
build_cmd = " ".join([
"BuildConsole",
"%s" % proj_path,
"/build",
"/cfg=\"Release|Win32\""
])
run_shell(build_cmd)
if not self.use_incredibuild:
# build the projects
for proj_name in self.win32_proj_info[key][Generator.KEY_TARGETS]:
self.build_win32_proj(vs_command, proj_path, proj_name, "build")
lib_file_path = os.path.join(build_folder_path, "%s.lib" % proj_name)
if not os.path.exists(lib_file_path):
# if the lib is not generated, rebuild the project
self.build_win32_proj(vs_command, proj_path, proj_name, "rebuild")
if not os.path.exists(lib_file_path):
raise Exception("Library %s not generated as expected!" % lib_file_path)
# copy the libs into prebuilt dir
for file_name in os.listdir(build_folder_path):
file_path = os.path.join(build_folder_path, file_name)
shutil.copy(file_path, win32_output_dir)
print("Win32 build succeeded.")
def build_ios_mac(self):
for key in self.xcode_proj_info.keys():
output_dir = self.xcode_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
ios_out_dir = os.path.join(self.tool_dir, output_dir, "ios")
mac_out_dir = os.path.join(self.tool_dir, output_dir, "mac")
ios_sim_libs_dir = os.path.join(ios_out_dir, "simulator")
ios_dev_libs_dir = os.path.join(ios_out_dir, "device")
for target in self.xcode_proj_info[key][Generator.KEY_TARGETS]:
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphonesimulator", ios_sim_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphoneos", ios_dev_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s Mac" % target, "", mac_out_dir)
run_shell(build_cmd, self.tool_dir)
# generate fat libs for iOS
for lib in os.listdir(ios_sim_libs_dir):
sim_lib = os.path.join(ios_sim_libs_dir, lib)
dev_lib = os.path.join(ios_dev_libs_dir, lib)
output_lib = os.path.join(ios_out_dir, lib)
lipo_cmd = "lipo -create -output \"%s\" \"%s\" \"%s\"" % (output_lib, sim_lib, dev_lib)
run_shell(lipo_cmd)
# remove the simulator & device libs in iOS
shutil.rmtree(ios_sim_libs_dir)
shutil.rmtree(ios_dev_libs_dir)
if not self.disable_strip:
# strip the libs
ios_strip_cmd = "xcrun -sdk iphoneos strip -S %s/*.a" % ios_out_dir
run_shell(ios_strip_cmd)
mac_strip_cmd = "xcrun strip -S %s/*.a" % mac_out_dir
run_shell(mac_strip_cmd)
def build_all_libs(self):
if os_is_mac():
# build for iOS & Mac
self.build_ios_mac()
if os_is_win32():
# build for win32
self.build_win32()
if not self.no_android:
self.build_android()
def do_generate(self):
output_dir = os.path.join(self.tool_dir, "prebuilt")
if self.need_clean and os.path.exists(output_dir):
shutil.rmtree(output_dir)
self.build_all_libs()
if __name__ == "__main__":
parser = ArgumentParser(description="Generate prebuilt engine for Cocos Engine.")
parser.add_argument('-c', dest='need_clean', action="store_true", help='Remove the \"prebuilt\" directory first.')
parser.add_argument('-n', "--no-android", dest='no_android', action="store_true", help='Not build android libs.')
parser.add_argument('-d', "--disable-strip", dest='disable_strip', action="store_true", help='Disable the strip of the generated libs.')
parser.add_argument('-i', "--incredibuild", dest='use_incredibuild', action="store_true", help='Use incredibuild to build win32 projects. Only available on windows.')
(args, unknown) = parser.parse_known_args()
if len(unknown) > 0:
print("unknown arguments: %s" % unknown)
gen_obj = Generator(args)
gen_obj.do_generate()
| mit | -5,762,952,687,203,060,000 | 36.131285 | 170 | 0.525615 | false |
wangjun/odoo | addons/base_report_designer/plugin/openerp_report_designer/test/test_fields.py | 391 | 1308 | #
# Use this module to retrive the fields you need according to the type
# of the OpenOffice operation:
# * Insert a Field
# * Insert a RepeatIn
#
import xmlrpclib
import time
sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object')
def get(object, level=3, ending=None, ending_excl=None, recur=None, root=''):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
res = sock.execute('terp', 3, 'admin', 'account.invoice', 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
print root+'/'+k
if res[k]['type'] in recur:
print root+'/'+k
if (res[k]['type'] in recur) and (level>0):
get(res[k]['relation'], level-1, ending, ending_excl, recur, root+'/'+k)
print 'Field selection for a rields', '='*40
get('account.invoice', level=0, ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
print
print 'Field selection for a repeatIn', '='*40
get('account.invoice', level=0, ending=['one2many','many2many'], recur=['many2one'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,724,470,000,264,365,000 | 31.7 | 115 | 0.627676 | false |
aldebjer/pysim | doc/conf.py | 1 | 10355 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySim documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 13:23:12 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from pysim import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
# 'sphinx.ext.imgmath',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySim'
copyright = '2014-2016, SSPA Sweden AB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySimdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PySim.tex', 'PySim Documentation',
'Linus Aldebjer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysim', 'PySim Documentation',
['Linus Aldebjer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PySim', 'PySim Documentation',
'Linus Aldebjer', 'PySim', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'PySim'
epub_author = 'Linus Aldebjer'
epub_publisher = 'Linus Aldebjer'
epub_copyright = '2014-2016, SSPA Sweden AB'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'PySim'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| bsd-3-clause | -8,412,391,684,831,758,000 | 29.636095 | 80 | 0.706036 | false |
yonglehou/pybrain | pybrain/rl/environments/shipsteer/viewer.py | 25 | 12969 | from __future__ import print_function
__author__ = 'Frank Sehnke, [email protected]'
#@PydevCodeAnalysisIgnore
#########################################################################
# OpenGL viewer for the FlexCube Environment
#
# The FlexCube Environment is a Mass-Spring-System composed of 8 mass points.
# These resemble a cube with flexible edges.
#
# This viewer uses an UDP connection found in tools/networking/udpconnection.py
#
# The viewer recieves the position matrix of the 8 masspoints and the center of gravity.
# With this information it renders a Glut based 3d visualization of teh FlexCube
#
# Options:
# - serverIP: The ip of the server to which the viewer should connect
# - ownIP: The IP of the computer running the viewer
# - port: The starting port (2 adjacent ports will be used)
#
# Saving the images is possible by setting self.savePics=True.
# Changing the point and angle of view is possible by using the mouse
# while button 1 or 2 pressed.
#
# Requirements: OpenGL
#
#########################################################################
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLE import *
from OpenGL.GLU import *
from time import sleep
from scipy import ones, array, cos, sin
from pybrain.tools.networking.udpconnection import UDPClient
class FlexCubeRenderer(object):
#Options: ServerIP(default:localhost), OwnIP(default:localhost), Port(default:21560)
def __init__(self, servIP="127.0.0.1", ownIP="127.0.0.1", port="21580"):
self.oldScreenValues = None
self.view = 0
self.worldRadius = 400
# Start of mousepointer
self.lastx = 0
self.lasty = 15
self.lastz = 300
self.zDis = 1
# Start of cube
self.cube = [0.0, 0.0, 0.0]
self.bmpCount = 0
self.actCount = 0
self.calcPhysics = 0
self.newPic = 1
self.picCount = 0
self.sensors = [0.0, 0.0, 0.0]
self.centerOfGrav = array([0.0, 5.0, 0.0])
self.savePics = False
self.drawCounter = 0
self.fps = 50
self.dt = 1.0 / float(self.fps)
self.step = 0
self.client = UDPClient(servIP, ownIP, port)
# If self.savePics=True this method saves the produced images
def saveTo(self, filename, format="JPEG"):
import Image # get PIL's functionality...
width, height = 800, 600
glPixelStorei(GL_PACK_ALIGNMENT, 1)
data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
image = Image.fromstring("RGB", (width, height), data)
image = image.transpose(Image.FLIP_TOP_BOTTOM)
image.save(filename, format)
print(('Saved image to ', filename))
return image
# the render method containing the Glut mainloop
def _render(self):
# Call init: Parameter(Window Position -> x, y, height, width)
self.init_GL(self, 300, 300, 800, 600)
self.quad = gluNewQuadric()
glutMainLoop()
# The Glut idle function
def drawIdleScene(self):
#recive data from server and update the points of the cube
try: self.sensors = self.client.listen(self.sensors)
except: pass
if self.sensors == ["r", "r", "r"]: self.centerOfGrav = array([0.0, 5.0, 0.0])
else:
self.step += 1
a = self.sensors[0] / 360.0 * 3.1428
dir = array([cos(a), 0.0, -sin(a)])
self.centerOfGrav += self.sensors[2] * dir * 0.02
self.drawScene()
if self.savePics:
self.saveTo("./screenshots/image_jump" + repr(10000 + self.picCount) + ".jpg")
self.picCount += 1
else: sleep(self.dt)
def drawScene(self):
''' This methode describes the complete scene.'''
# clear the buffer
if self.zDis < 10: self.zDis += 0.25
if self.lastz > 100: self.lastz -= self.zDis
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# Point of view
glRotatef(self.lastx, 0.0, 1.0, 0.0)
glRotatef(self.lasty, 1.0, 0.0, 0.0)
#glRotatef(15, 0.0, 0.0, 1.0)
# direction of view is aimed to the center of gravity of the cube
glTranslatef(-self.centerOfGrav[0], -self.centerOfGrav[1] - 50.0, -self.centerOfGrav[2] - self.lastz)
#Objects
#Massstab
for lk in range(41):
if float(lk - 20) / 10.0 == (lk - 20) / 10:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -30)
quad = gluNewQuadric()
gluCylinder(quad, 2, 2, 60, 4, 1)
glPopMatrix()
else:
if float(lk - 20) / 5.0 == (lk - 20) / 5:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -15.0)
quad = gluNewQuadric()
gluCylinder(quad, 1, 1, 30, 4, 1)
glPopMatrix()
else:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -7.5)
quad = gluNewQuadric()
gluCylinder(quad, 0.5, 0.5, 15, 4, 1)
glPopMatrix()
# Floor
tile = self.worldRadius / 40.0
glEnable (GL_BLEND)
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor3f(0.8, 0.8, 0.5)
glPushMatrix()
glTranslatef(0.0, -3.0, 0.0)
glBegin(GL_QUADS)
glNormal(0.0, 1.0, 0.0)
glVertex3f(-self.worldRadius, 0.0, -self.worldRadius)
glVertex3f(-self.worldRadius, 0.0, self.worldRadius)
glVertex3f(self.worldRadius, 0.0, self.worldRadius)
glVertex3f(self.worldRadius, 0.0, -self.worldRadius)
glEnd()
glPopMatrix()
#Water
for xF in range(40):
for yF in range(40):
if float(xF + yF) / 2.0 == (xF + yF) / 2: glColor4f(0.7, 0.7, 1.0, 0.5)
else: glColor4f(0.9, 0.9, 1.0, 0.5)
glPushMatrix()
glTranslatef(0.0, -0.03, 0.0)
glBegin(GL_QUADS)
glNormal(0.5 + sin(float(xF) + float(self.step) / 4.0) * 0.5, 0.5 + cos(float(xF) + float(self.step) / 4.0) * 0.5, 0.0)
for i in range(2):
for k in range(2):
glVertex3f((i + xF - 20) * tile, sin(float(xF + i) + float(self.step) / 4.0) * 3.0, ((k ^ i) + yF - 20) * tile)
glEnd()
glPopMatrix()
self.ship()
# swap the buffer
glutSwapBuffers()
def ship(self):
glColor3f(0.4, 0.1, 0.2)
glPushMatrix()
glTranslate(self.centerOfGrav[0] + 14, self.centerOfGrav[1], self.centerOfGrav[2])
glRotatef(180 - self.sensors[0], 0.0, 1.0, 0.0)
self.cuboid(0, 0, 0, 20, 5, 5)
#bow of ship
glBegin(GL_TRIANGLES)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 5, 5]), self.points2Vector([-5, 6, 2.5], [0, 5, 0])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 5, 0), glVertex3f(0, 5, 5)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 0, 5]), self.points2Vector([-5, 6, 2.5], [0, 5, 5])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 5), glVertex3f(0, 5, 5)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 0, 0]), self.points2Vector([-5, 6, 2.5], [0, 0, 5])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 5), glVertex3f(0, 0, 0)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 5, 0]), self.points2Vector([-5, 6, 2.5], [0, 0, 0])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 0), glVertex3f(0, 5, 0)
glEnd()
# stern
glPushMatrix()
glRotatef(-90, 1.0, 0.0, 0.0)
glTranslatef(15, -2.5, 0)
gluCylinder(self.quad, 2.5, 2.5, 5, 10, 1)
glTranslatef(0, 0, 5)
gluDisk(self.quad, 0, 2.5, 10, 1)
glPopMatrix()
# deck
if abs(self.sensors[0]) < 5.0: reward = (self.sensors[2] + 10.0) / 50.0
else: reward = 0.2
glColor3f(1.0 - reward, reward, 0)
self.cuboid(5, 5, 1, 10, 8, 4)
glPushMatrix()
glRotatef(-90, 1.0, 0.0, 0.0)
glTranslatef(13, -2.5, 5)
glColor3f(1, 1, 1)
gluCylinder(self.quad, 1, 0.8, 5, 20, 1)
glPopMatrix()
glPopMatrix()
def cuboid(self, x0, y0, z0, x1, y1, z1):
glBegin(GL_QUADS)
glNormal(0, 0, 1)
glVertex3f(x0, y0, z1); glVertex3f(x0, y1, z1); glVertex3f(x1, y1, z1); glVertex3f(x1, y0, z1) #front
glNormal(-1, 0, 0)
glVertex3f(x0, y0, z0); glVertex3f(x0, y0, z1); glVertex3f(x0, y1, z1); glVertex3f(x0, y1, z0) # left
glNormal(0, -1, 0)
glVertex3f(x0, y0, z0); glVertex3f(x0, y0, z1); glVertex3f(x1, y0, z1); glVertex3f(x1, y0, z0) # bottom
glNormal(0, 0, -1)
glVertex3f(x0, y0, z0); glVertex3f(x1, y0, z0); glVertex3f(x1, y1, z0); glVertex3f(x0, y1, z0) # back
glNormal(0, 1, 0)
glVertex3f(x0, y1, z0); glVertex3f(x1, y1, z0); glVertex3f(x1, y1, z1); glVertex3f(x0, y1, z1) # top
glNormal(1, 0, 0)
glVertex3f(x1, y0, z0); glVertex3f(x1, y0, z1); glVertex3f(x1, y1, z1); glVertex3f(x1, y1, z0) # right
glEnd()
def calcNormal(self, xVector, yVector):
result = [0, 0, 0]
result[0] = xVector[1] * yVector[2] - yVector[1] * xVector[2]
result[1] = -xVector[0] * yVector[2] + yVector[0] * xVector[2]
result[2] = xVector[0] * yVector[1] - yVector[0] * xVector[1]
return [result[0], result[1], result[2]]
def points2Vector(self, startPoint, endPoint):
result = [0, 0, 0]
result[0] = endPoint[0] - startPoint[0]
result[1] = endPoint[1] - startPoint[1]
result[2] = endPoint[2] - startPoint[2]
return [result[0], result[1], result[2]]
def resizeScene(self, width, height):
'''Needed if window size changes.'''
if height == 0: # Prevent A Divide By Zero If The Window Is Too Small
height = 1
glViewport(0, 0, width, height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(width) / float(height), 0.1, 700.0)
glMatrixMode(GL_MODELVIEW)
def activeMouse(self, x, y):
#Returns mouse coordinates while any mouse button is pressed.
# store the mouse coordinate
if self.mouseButton == GLUT_LEFT_BUTTON:
self.lastx = x - self.xOffset
self.lasty = y - self.yOffset
if self.mouseButton == GLUT_RIGHT_BUTTON:
self.lastz = y - self.zOffset
# redisplay
glutPostRedisplay()
def passiveMouse(self, x, y):
'''Returns mouse coordinates while no mouse button is pressed.'''
pass
def completeMouse(self, button, state, x, y):
#Returns mouse coordinates and which button was pressed resp. released.
self.mouseButton = button
if state == GLUT_DOWN:
self.xOffset = x - self.lastx
self.yOffset = y - self.lasty
self.zOffset = y - self.lastz
# redisplay
glutPostRedisplay()
#Initialise an OpenGL windows with the origin at x, y and size of height, width.
def init_GL(self, pyWorld, x, y, height, width):
# initialize GLUT
glutInit([])
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(height, width)
glutInitWindowPosition(x, y)
glutCreateWindow("The Curious Cube")
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_MODELVIEW)
# initialize lighting */
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1, 1, 1, 1.0])
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
#
glColorMaterial(GL_FRONT, GL_DIFFUSE)
glEnable(GL_COLOR_MATERIAL)
# Automatic vector normalise
glEnable(GL_NORMALIZE)
### Instantiate the virtual world ###
glutDisplayFunc(pyWorld.drawScene)
glutMotionFunc(pyWorld.activeMouse)
glutMouseFunc(pyWorld.completeMouse)
glutReshapeFunc(pyWorld.resizeScene)
glutIdleFunc(pyWorld.drawIdleScene)
if __name__ == '__main__':
s = sys.argv[1:]
r = FlexCubeRenderer(*s)
r._render()
| bsd-3-clause | 2,205,941,729,990,380,000 | 38.539634 | 135 | 0.560645 | false |
Phonemetra/TurboCoin | test/functional/rpc_deprecated.py | 1 | 1168 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import TurbocoinTestFramework
# from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(TurbocoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], []]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| mit | 4,133,175,544,555,493,400 | 39.275862 | 117 | 0.663527 | false |
storborg/axibot | axibot/colors.py | 1 | 1166 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from operator import itemgetter
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
pen_sets = {
'precise-v5': {
'black': (59, 59, 59),
'blue': (61, 93, 134),
'red': (138, 56, 60),
'green': (52, 126, 101),
'purple': (93, 90, 179),
'lightblue': (69, 153, 189),
'pink': (225, 87, 146),
}
}
def rgb_to_lab(rgb):
rgb_color = sRGBColor(rgb[0], rgb[1], rgb[2])
lab_color = convert_color(rgb_color, LabColor)
return lab_color.lab_l, lab_color.lab_a, lab_color.lab_b
def perceptual_distance(a, b):
a = rgb_to_lab(a)
b = rgb_to_lab(b)
return math.sqrt((b[2] - a[2])**2 +
(b[1] - a[1])**2 +
(b[0] - a[0])**2)
def find_pen_match(color, pen_set):
scores = {}
for pen, pen_color in pen_sets[pen_set].items():
scores[pen] = perceptual_distance(color, pen_color)
scores = scores.items()
scores.sort(key=itemgetter(1))
return scores[0]
| gpl-2.0 | 3,535,368,727,434,133,000 | 26.116279 | 66 | 0.56175 | false |
fossoult/odoo | openerp/__init__.py | 235 | 3586 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,611,521,554,835,735,000 | 34.50495 | 79 | 0.554657 | false |
karllessard/tensorflow | tensorflow/python/kernel_tests/cumulative_logsumexp_test.py | 15 | 4359 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for cumulative_logsumexp op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CumulativeLogsumexpTest(test.TestCase):
valid_dtypes = [dtypes.float32, dtypes.float64]
def _computeLogSumExp(self, x, **kwargs):
result_naive = math_ops.cumsum(math_ops.exp(x), **kwargs)
result_fused = math_ops.exp(math_ops.cumulative_logsumexp(x, **kwargs))
return result_naive, result_fused
def _testLogSumExp(self, x, dtype=dtypes.float32, use_gpu=False, **kwargs):
with self.cached_session(use_gpu=use_gpu):
x = ops.convert_to_tensor(x, dtype=dtype)
result_naive, result_fused = self.evaluate(
self._computeLogSumExp(x, **kwargs))
self.assertAllClose(result_naive, result_fused)
def _testLogSumExpAllArgs(self, x, axis=0, use_gpu=False):
for dtype in self.valid_dtypes:
for reverse in (True, False):
for exclusive in (True, False):
self._testLogSumExp(
x, dtype=dtype, use_gpu=use_gpu,
reverse=reverse, exclusive=exclusive,
axis=axis)
def testMinusInfinity(self):
x = np.log([0., 0., 1., 1., 1., 1., 0., 0.])
self._testLogSumExpAllArgs(x, use_gpu=False)
self._testLogSumExpAllArgs(x, use_gpu=True)
def test1D(self):
x = np.arange(10) / 10.0 - 0.5
self._testLogSumExpAllArgs(x, use_gpu=False)
self._testLogSumExpAllArgs(x, use_gpu=True)
def test2D(self):
x = np.reshape(np.arange(20) / 20.0 - 0.5, (2, 10))
for axis in (-2, -1, 0, 1):
self._testLogSumExpAllArgs(x, axis=axis, use_gpu=False)
self._testLogSumExpAllArgs(x, axis=axis, use_gpu=True)
def _testGradient(self, x, use_gpu=False, **kwargs):
with self.cached_session(use_gpu=use_gpu):
x = ops.convert_to_tensor(x, dtype=dtypes.float64)
grad_naive_theoretical, _ = gradient_checker_v2.compute_gradient(
lambda y: math_ops.cumsum(math_ops.exp(y), **kwargs), [x])
grad_fused_theoretical, _ = gradient_checker_v2.compute_gradient(
lambda y: math_ops.exp(math_ops.cumulative_logsumexp(y, **kwargs)),
[x])
self.assertAllClose(grad_fused_theoretical, grad_naive_theoretical)
def testGradient(self):
for reverse in (True, False):
for exclusive in (True, False):
x = np.arange(10) / 10.0 - 0.5
self._testGradient(x, use_gpu=False,
reverse=reverse, exclusive=exclusive)
self._testGradient(x, use_gpu=True,
reverse=reverse, exclusive=exclusive)
def _logSumExpMap(self, x):
return map_fn.map_fn(
lambda i: math_ops.reduce_logsumexp(x[:i + 1]),
math_ops.range(array_ops.shape(x)[0]),
dtype=x.dtype)
def test1DLarge(self):
# This test ensures that the operation is correct even when the naive
# implementation would overflow.
x_np = np.arange(20) * 20.0
for use_gpu in (True, False):
with self.cached_session(use_gpu=use_gpu):
x_tf = ops.convert_to_tensor(x_np, dtype=dtypes.float32)
result_fused = self.evaluate(math_ops.cumulative_logsumexp(x_tf))
result_map = self.evaluate(self._logSumExpMap(x_tf))
self.assertAllClose(result_fused, result_map)
if __name__ == '__main__':
test.main()
| apache-2.0 | -6,845,386,402,650,338,000 | 35.630252 | 80 | 0.662767 | false |
ssvsergeyev/ZenPacks.zenoss.AWS | src/boto/tests/unit/beanstalk/test_exception.py | 114 | 2085 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.beanstalk.exception import simple
from tests.compat import unittest
class FakeError(object):
def __init__(self, code, status, reason, body):
self.code = code
self.status = status
self.reason = reason
self.body = body
class TestExceptions(unittest.TestCase):
def test_exception_class_names(self):
# Create exception from class name
error = FakeError('TooManyApplications', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Create exception from class name + 'Exception' as seen from the
# live service today
error = FakeError('TooManyApplicationsException', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Make sure message body is present
self.assertEqual(exception.message, 'bar')
| gpl-2.0 | -9,140,496,328,198,896,000 | 41.55102 | 77 | 0.717986 | false |
jnewland/home-assistant | homeassistant/components/wemo/binary_sensor.py | 7 | 4329 | """Support for WeMo binary sensors."""
import asyncio
import logging
import async_timeout
import requests
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.exceptions import PlatformNotReady
from . import SUBSCRIPTION_REGISTRY
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Register discovered WeMo binary sensors."""
from pywemo import discovery
if discovery_info is not None:
location = discovery_info['ssdp_description']
mac = discovery_info['mac_address']
try:
device = discovery.device_from_description(location, mac)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error('Unable to access %s (%s)', location, err)
raise PlatformNotReady
if device:
add_entities([WemoBinarySensor(hass, device)])
class WemoBinarySensor(BinarySensorDevice):
"""Representation a WeMo binary sensor."""
def __init__(self, hass, device):
"""Initialize the WeMo sensor."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo sensor."""
_LOGGER.debug("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(
self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Wemo sensor added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo sensor is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the sensor state."""
try:
self._state = self.wemo.get_state(force_update)
if not self._available:
_LOGGER.info('Reconnected to %s', self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)",
self.name, err)
self._available = False
@property
def unique_id(self):
"""Return the id of this WeMo sensor."""
return self._serialnumber
@property
def name(self):
"""Return the name of the service if any."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def available(self):
"""Return true if sensor is available."""
return self._available
| apache-2.0 | 3,448,249,128,982,904,300 | 32.820313 | 78 | 0.624163 | false |
payet-s/pyrser | pyrser/directives/ignore.py | 2 | 1577 | from pyrser import meta, parsing
@meta.rule(parsing.Parser, "Base.ignore_cxx")
def ignore_cxx(self) -> bool:
"""Consume comments and whitespace characters."""
self._stream.save_context()
while not self.read_eof():
idxref = self._stream.index
if self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
if self.peek_text("//"):
while not self.read_eof() and not self.peek_char("\n"):
self._stream.incpos()
if not self.read_char("\n") and self.read_eof():
return self._stream.validate_context()
if self.peek_text("/*"):
while not self.read_eof() and not self.peek_text("*/"):
self._stream.incpos()
if not self.read_text("*/") and self.read_eof():
return self._stream.restore_context()
if idxref == self._stream.index:
break
return self._stream.validate_context()
@meta.directive("ignore")
class Ignore(parsing.DirectiveWrapper):
def begin(self, parser, convention: str):
if convention == "null":
parser.push_ignore(parsing.Parser.ignore_null)
if convention == "C/C++":
parser.push_ignore(parsing.Parser.ignore_cxx)
if convention == "blanks":
parser.push_ignore(parsing.Parser.ignore_blanks)
return True
def end(self, parser, convention: str):
parser.pop_ignore()
return True
| gpl-3.0 | 8,555,063,983,756,145,000 | 36.547619 | 67 | 0.571972 | false |
TheMOOCAgency/edx-platform | openedx/core/djangoapps/content/block_structure/signals.py | 13 | 1230 | """
Signal handlers for invalidating cached data.
"""
from django.conf import settings
from django.dispatch.dispatcher import receiver
from xmodule.modulestore.django import SignalHandler
from .api import clear_course_from_cache
from .tasks import update_course_in_cache
@receiver(SignalHandler.course_published)
def _listen_for_course_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been published in the module
store and creates/updates the corresponding cache entry.
"""
clear_course_from_cache(course_key)
# The countdown=0 kwarg ensures the call occurs after the signal emitter
# has finished all operations.
update_course_in_cache.apply_async(
[unicode(course_key)],
countdown=settings.BLOCK_STRUCTURES_SETTINGS['BLOCK_STRUCTURES_COURSE_PUBLISH_TASK_DELAY'],
)
@receiver(SignalHandler.course_deleted)
def _listen_for_course_delete(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been deleted from the
module store and invalidates the corresponding cache entry if one
exists.
"""
clear_course_from_cache(course_key)
| agpl-3.0 | -3,818,195,103,858,472,400 | 33.166667 | 99 | 0.742276 | false |
letaureau/b-tk.core | Testing/Python/SeparateKnownVirtualMarkersFilterTest.py | 4 | 18217 | import btk
import unittest
import _TDDConfigure
import numpy
class SeparateKnownVirtualMarkersFilterTest(unittest.TestCase):
def test_Constructor(self):
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
labels = skvm.GetVirtualReferenceFrames()
num = 19
self.assertEqual(labels.size(), num)
it = labels.begin()
if (labels.size() >= num):
# HED
self.assertEqual(it.value().Origin, 'HEDO')
self.assertEqual(it.value().Axis1, 'HEDA')
self.assertEqual(it.value().Axis2, 'HEDL')
self.assertEqual(it.value().Axis3, 'HEDP')
it.incr()
# LCL
self.assertEqual(it.value().Origin, 'LCLO')
self.assertEqual(it.value().Axis1, 'LCLA')
self.assertEqual(it.value().Axis2, 'LCLL')
self.assertEqual(it.value().Axis3, 'LCLP')
it.incr()
# LFE
self.assertEqual(it.value().Origin, 'LFEO')
self.assertEqual(it.value().Axis1, 'LFEA')
self.assertEqual(it.value().Axis2, 'LFEL')
self.assertEqual(it.value().Axis3, 'LFEP')
it.incr()
# LFO
self.assertEqual(it.value().Origin, 'LFOO')
self.assertEqual(it.value().Axis1, 'LFOA')
self.assertEqual(it.value().Axis2, 'LFOL')
self.assertEqual(it.value().Axis3, 'LFOP')
it.incr()
# LHN
self.assertEqual(it.value().Origin, 'LHNO')
self.assertEqual(it.value().Axis1, 'LHNA')
self.assertEqual(it.value().Axis2, 'LHNL')
self.assertEqual(it.value().Axis3, 'LHNP')
it.incr()
# LHU
self.assertEqual(it.value().Origin, 'LHUO')
self.assertEqual(it.value().Axis1, 'LHUA')
self.assertEqual(it.value().Axis2, 'LHUL')
self.assertEqual(it.value().Axis3, 'LHUP')
it.incr()
# LRA
self.assertEqual(it.value().Origin, 'LRAO')
self.assertEqual(it.value().Axis1, 'LRAA')
self.assertEqual(it.value().Axis2, 'LRAL')
self.assertEqual(it.value().Axis3, 'LRAP')
it.incr()
# LTI
self.assertEqual(it.value().Origin, 'LTIO')
self.assertEqual(it.value().Axis1, 'LTIA')
self.assertEqual(it.value().Axis2, 'LTIL')
self.assertEqual(it.value().Axis3, 'LTIP')
it.incr()
# LTO
self.assertEqual(it.value().Origin, 'LTOO')
self.assertEqual(it.value().Axis1, 'LTOA')
self.assertEqual(it.value().Axis2, 'LTOL')
self.assertEqual(it.value().Axis3, 'LTOP')
it.incr()
# PEL
self.assertEqual(it.value().Origin, 'PELO')
self.assertEqual(it.value().Axis1, 'PELA')
self.assertEqual(it.value().Axis2, 'PELL')
self.assertEqual(it.value().Axis3, 'PELP')
it.incr()
# RCL
self.assertEqual(it.value().Origin, 'RCLO')
self.assertEqual(it.value().Axis1, 'RCLA')
self.assertEqual(it.value().Axis2, 'RCLL')
self.assertEqual(it.value().Axis3, 'RCLP')
it.incr()
# RFE
self.assertEqual(it.value().Origin, 'RFEO')
self.assertEqual(it.value().Axis1, 'RFEA')
self.assertEqual(it.value().Axis2, 'RFEL')
self.assertEqual(it.value().Axis3, 'RFEP')
it.incr()
# RFO
self.assertEqual(it.value().Origin, 'RFOO')
self.assertEqual(it.value().Axis1, 'RFOA')
self.assertEqual(it.value().Axis2, 'RFOL')
self.assertEqual(it.value().Axis3, 'RFOP')
it.incr()
# RHN
self.assertEqual(it.value().Origin, 'RHNO')
self.assertEqual(it.value().Axis1, 'RHNA')
self.assertEqual(it.value().Axis2, 'RHNL')
self.assertEqual(it.value().Axis3, 'RHNP')
it.incr()
# RHU
self.assertEqual(it.value().Origin, 'RHUO')
self.assertEqual(it.value().Axis1, 'RHUA')
self.assertEqual(it.value().Axis2, 'RHUL')
self.assertEqual(it.value().Axis3, 'RHUP')
it.incr()
# RRA
self.assertEqual(it.value().Origin, 'RRAO')
self.assertEqual(it.value().Axis1, 'RRAA')
self.assertEqual(it.value().Axis2, 'RRAL')
self.assertEqual(it.value().Axis3, 'RRAP')
it.incr()
# RTI
self.assertEqual(it.value().Origin, 'RTIO')
self.assertEqual(it.value().Axis1, 'RTIA')
self.assertEqual(it.value().Axis2, 'RTIL')
self.assertEqual(it.value().Axis3, 'RTIP')
it.incr()
# RTO
self.assertEqual(it.value().Origin, 'RTOO')
self.assertEqual(it.value().Axis1, 'RTOA')
self.assertEqual(it.value().Axis2, 'RTOL')
self.assertEqual(it.value().Axis3, 'RTOP')
it.incr()
# TRX
self.assertEqual(it.value().Origin, 'TRXO')
self.assertEqual(it.value().Axis1, 'TRXA')
self.assertEqual(it.value().Axis2, 'TRXL')
self.assertEqual(it.value().Axis3, 'TRXP')
it.incr()
labels2 = skvm.GetVirtualMarkers()
num = 2
self.assertEqual(labels2.size(), num)
it2 = labels2.begin()
if (labels2.size() >= num):
self.assertEqual(it2.value(), 'CentreOfMass')
it2.incr()
self.assertEqual(it2.value(), 'CentreOfMassFloor')
def test_DefaultLabels(self):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample09/PlugInC3D.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 18)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'RKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIB'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RASI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTHI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RHEE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RANK'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RCLA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTHI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LASI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'C7'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LANK'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'SACR'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LHEE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LCLA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIB');
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 36)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 0)
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 32)
def test_DefaultLabelsAndPrefix(self):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample04/sub_labels.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.SetLabelPrefix('Matt:')
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 50)
inc = 0
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 36)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 0)
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 28)
def test_FromLabelsLists(self):
# virtual markers for frame axes
labels = ['LFE', 'LFO', 'LTI', 'LTO', 'RFE', 'RFO', 'RTI', 'RTO']
virtualMarkerLabelsAxes = btk.btkStringAxesList()
for i in range(0, len(labels)):
label = labels[i]
virtualMarkerLabelsAxes.push_back(btk.btkStringAxes(label + 'O', label + 'A', label + 'L', label + 'P'))
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample04/sub_labels.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.SetLabelPrefix('Matt:')
skvm.AppendVirtualMarker('LKNE')
skvm.AppendVirtualMarker('RKNE')
skvm.SetVirtualReferenceFrames(virtualMarkerLabelsAxes)
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 52)
inc = 0
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 32)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 2)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RKNE')
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 28)
| bsd-3-clause | -8,984,920,669,402,484,000 | 52.89645 | 116 | 0.602789 | false |
badreddinetahir/pwn_plug_sources | src/voiper/sulley/requests/sip_valid.py | 8 | 1257 | from sulley import *
s_initialize("INVITE_VALID")
s_static('\r\n'.join(['INVITE sip:[email protected] SIP/2.0',
'CSeq: 1 INVITE',
'Via: SIP/2.0/UDP 192.168.3.102:5068;branch=z9hG4bKlm4zshdowki1t8c7ep6j0yavq2ug5r3x;rport',
'From: "nnp" <sip:[email protected]>;tag=so08p5k39wuv1dczfnij7bet4l2m6hrq',
'Call-ID: rzxd6tm98v0eal1cifg2py7sj3wk54ub@ubuntu',
'To: <sip:[email protected]>',
'Max-Forwards: 70',
'Content-Type: application/sdp',
'\r\n',
'v=0',
'o=somegimp 1190505265 1190505265 IN IP4 192.168.3.101',
's=Opal SIP Session',
'i=some information string',
'u=http://unprotectedhex.com/someuri.htm',
'[email protected]',
'c=IN IP4 192.168.3.101',
'b=CT:8',
't=0 1',
'm=audio 5028 RTP/AVP 101 96 107 110 0 8',
'a=rtpmap:101 telephone-event/8000',
]))
################################################################################
s_initialize("CANCEL_VALID")
s_static('\r\n'.join(['CANCEL sip:[email protected] SIP/2.0',
'CSeq: 1 CANCEL',
'Via: SIP/2.0/UDP 192.168.3.102:5068;branch=z9hG4bKlm4zshdowki1t8c7ep6j0yavq2ug5r3x;rport',
'From: "nnp" <sip:[email protected]>;tag=so08p5k39wuv1dczfnij7bet4l2m6hrq',
'Call-ID: rzxd6tm98v0eal1cifg2py7sj3wk54ub@ubuntu',
'To: <sip:[email protected]>',
'Max-Forwards: 70',
'\r\n'
]))
| gpl-3.0 | -1,391,954,082,609,255,700 | 30.425 | 91 | 0.655529 | false |
donkirkby/django | tests/reverse_lookup/tests.py | 326 | 1675 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Poll, User
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
| bsd-3-clause | -6,503,586,863,480,031,000 | 31.211538 | 77 | 0.602985 | false |
fivejjs/PTVS | Python/Tests/TestData/DjangoAnalysisTestApp/DjangoAnalysisTestApp/settings.py | 18 | 5537 | # Django settings for DjangoAnalysisTestApp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjangoAnalysisTestApp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'DjangoAnalysisTestApp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 | -3,768,361,702,409,830,400 | 33.954545 | 101 | 0.666426 | false |
hgrif/ds-utils | dsutils/sklearn.py | 1 | 2913 | import numpy as np
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
def multiclass_roc_auc_score(y_true, y_score, label_binarizer=None, **kwargs):
"""Compute ROC AUC score for multiclass.
:param y_true: true multiclass predictions [n_samples]
:param y_score: multiclass scores [n_samples, n_classes]
:param label_binarizer: Binarizer to use (sklearn.preprocessing.LabelBinarizer())
:param kwargs: Additional keyword arguments for sklearn.metrics.roc_auc_score
:return: Multiclass ROC AUC score
"""
if label_binarizer is None:
label_binarizer = preprocessing.LabelBinarizer()
binarized_true = label_binarizer.fit_transform(y_true)
score = metrics.roc_auc_score(binarized_true, y_score, **kwargs)
return score
def split_train_test(y, do_split_stratified=True, **kwargs):
"""Get indexes to split y in train and test sets.
:param y: Labels of samples
:param do_split_stratified: Use StratifiedShuffleSplit (else ShuffleSplit)
:param kwargs: Keyword arguments StratifiedShuffleSplit or ShuffleSplit
:return: (train indexes, test indexes)
"""
if do_split_stratified:
data_splitter = cross_validation.StratifiedShuffleSplit(y, n_iter=1,
**kwargs)
else:
data_splitter = cross_validation.ShuffleSplit(y, n_iter=1, **kwargs)
train_ix, test_ix = data_splitter.__iter__().next()
return train_ix, test_ix
class OrderedLabelEncoder(preprocessing.LabelEncoder):
"""Encode labels with value between 0 and n_classes-1 in specified order.
See also
--------
sklearn.preprocessing.LabelEncoder
"""
def __init__(self, classes):
self.classes_ = np.array(classes, dtype='O')
def fit(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def fit_transform(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.array(np.unique(y), dtype='O')
preprocessing.label._check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
transformed_y = np.zeros_like(y, dtype=int)
for i_class, current_class in enumerate(self.classes_):
transformed_y[np.array(y) == current_class] = i_class
return transformed_y | mit | -9,122,648,073,810,248,000 | 35.886076 | 85 | 0.640577 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.