repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
LighthouseUK/koalacore | koalacore/config.py | 1 | 2449 | """
This configuration parser takes a platform argument and uses that for the sections of the config file
i.e. Production, Development, Testing
All you need to do is use the corresponding get methods for the data you need. The platform is automatically used
where applicable.
"""
from __future__ import absolute_import
import codecs
from datetime import datetime
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
__author__ = 'Matt Badger'
class CustomConfigParser(SafeConfigParser):
def __init__(self, platform, *args, **kwargs):
self.platform = platform
SafeConfigParser.__init__(self, *args, **kwargs)
def get(self, option, section=None, raw=False, vars=None):
if section is None:
section = self.platform
return SafeConfigParser.get(self, section, option, raw=False, vars=None)
def getint(self, option, section=None):
return int(self.get(option=option, section=section))
def getfloat(self, option, section=None):
return float(self.get(option=option, section=section))
def getboolean(self, option, section=None):
v = self.get(section=section, option=option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def getdate(self, option, section=None, raw=False, vars=None):
if section is None:
section = self.platform
value = SafeConfigParser.get(self, section, option, raw=False, vars=None)
return datetime.strptime(value, '%Y-%m-%d')
def getdatetime(self, option, section=None, raw=False, vars=None):
if section is None:
section = self.platform
value = SafeConfigParser.get(self, section, option, raw=False, vars=None)
return datetime.strptime(value, '%Y-%m-%d %H%M%S')
def getlist(self, option, section=None, raw=False, vars=None):
if section is None:
section = self.platform
setting = SafeConfigParser.get(self, section, option, raw=False, vars=None)
return setting.split(',')
def load_config(config_file_path, platform='Development', allow_no_value=True, **kwargs):
config_instance = CustomConfigParser(platform=platform, allow_no_value=allow_no_value, **kwargs)
with codecs.open(config_file_path, 'r', encoding='utf-8') as f:
config_instance.readfp(f)
return config_instance
| lgpl-3.0 | 8,982,907,238,179,946,000 | 36.676923 | 113 | 0.679053 | false |
isandlaTech/cohorte-demos | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/sleekxmpp/features/feature_session/session.py | 12 | 1398 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.stanza import Iq, StreamFeatures
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.features.feature_session import stanza
log = logging.getLogger(__name__)
class FeatureSession(BasePlugin):
name = 'feature_session'
description = 'RFC 3920: Stream Feature: Start Session'
dependencies = set()
stanza = stanza
def plugin_init(self):
self.xmpp.register_feature('session',
self._handle_start_session,
restart=False,
order=10001)
register_stanza_plugin(Iq, stanza.Session)
register_stanza_plugin(StreamFeatures, stanza.Session)
def _handle_start_session(self, features):
"""
Handle the start of the session.
Arguments:
feature -- The stream features element.
"""
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq.enable('session')
iq.send(now=True)
self.xmpp.features.add('session')
log.debug("Established Session")
self.xmpp.sessionstarted = True
self.xmpp.session_started_event.set()
self.xmpp.event('session_start')
| apache-2.0 | 5,106,323,120,280,622,000 | 24.888889 | 62 | 0.642346 | false |
zhukaixy/kbengine | kbe/res/scripts/common/Lib/distutils/tests/test_build_ext.py | 71 | 18085 | import sys
import os
from io import StringIO
import textwrap
from distutils.core import Distribution
from distutils.command.build_ext import build_ext
from distutils import sysconfig
from distutils.tests.support import (TempdirManager, LoggingSilencer,
copy_xxmodule_c, fixup_build_ext)
from distutils.extension import Extension
from distutils.errors import (
CompileError, DistutilsPlatformError, DistutilsSetupError,
UnknownFileError)
import unittest
from test import support
# http://bugs.python.org/issue4373
# Don't load the xx module more than once.
ALREADY_TESTED = False
class BuildExtTestCase(TempdirManager,
LoggingSilencer,
unittest.TestCase):
def setUp(self):
# Create a simple test environment
# Note that we're making changes to sys.path
super(BuildExtTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
self.sys_path = sys.path, sys.path[:]
sys.path.append(self.tmp_dir)
import site
self.old_user_base = site.USER_BASE
site.USER_BASE = self.mkdtemp()
from distutils.command import build_ext
build_ext.USER_BASE = site.USER_BASE
def test_build_ext(self):
global ALREADY_TESTED
copy_xxmodule_c(self.tmp_dir)
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
xx_ext = Extension('xx', [xx_c])
dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
fixup_build_ext(cmd)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
if ALREADY_TESTED:
self.skipTest('Already tested in %s' % ALREADY_TESTED)
else:
ALREADY_TESTED = type(self).__name__
import xx
for attr in ('error', 'foo', 'new', 'roj'):
self.assertTrue(hasattr(xx, attr))
self.assertEqual(xx.foo(2, 5), 7)
self.assertEqual(xx.foo(13,15), 28)
self.assertEqual(xx.new().demo(), None)
if support.HAVE_DOCSTRINGS:
doc = 'This is a template module just for instruction.'
self.assertEqual(xx.__doc__, doc)
self.assertIsInstance(xx.Null(), xx.Null)
self.assertIsInstance(xx.Str(), xx.Str)
def tearDown(self):
# Get everything back to normal
support.unload('xx')
sys.path = self.sys_path[0]
sys.path[:] = self.sys_path[1]
import site
site.USER_BASE = self.old_user_base
from distutils.command import build_ext
build_ext.USER_BASE = self.old_user_base
super(BuildExtTestCase, self).tearDown()
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
old = sys.platform
sys.platform = 'sunos' # fooling finalize_options
from distutils.sysconfig import _config_vars
old_var = _config_vars.get('Py_ENABLE_SHARED')
_config_vars['Py_ENABLE_SHARED'] = 1
try:
cmd.ensure_finalized()
finally:
sys.platform = old
if old_var is None:
del _config_vars['Py_ENABLE_SHARED']
else:
_config_vars['Py_ENABLE_SHARED'] = old_var
# make sure we get some library dirs under solaris
self.assertGreater(len(cmd.library_dirs), 0)
def test_user_site(self):
import site
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
# making sure the user option is there
options = [name for name, short, lable in
cmd.user_options]
self.assertIn('user', options)
# setting a value
cmd.user = 1
# setting user based lib and include
lib = os.path.join(site.USER_BASE, 'lib')
incl = os.path.join(site.USER_BASE, 'include')
os.mkdir(lib)
os.mkdir(incl)
# let's run finalize
cmd.ensure_finalized()
# see if include_dirs and library_dirs
# were set
self.assertIn(lib, cmd.library_dirs)
self.assertIn(lib, cmd.rpath)
self.assertIn(incl, cmd.include_dirs)
def test_optional_extension(self):
# this extension will fail, but let's ignore this failure
# with the optional argument.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertRaises((UnknownFileError, CompileError),
cmd.run) # should raise an error
modules = [Extension('foo', ['xxx'], optional=True)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
cmd.run() # should pass
def test_finalize_options(self):
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.finalize_options()
from distutils import sysconfig
py_include = sysconfig.get_python_inc()
self.assertIn(py_include, cmd.include_dirs)
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
self.assertIn(plat_py_include, cmd.include_dirs)
# make sure cmd.libraries is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.libraries = 'my_lib, other_lib lastlib'
cmd.finalize_options()
self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib'])
# make sure cmd.library_dirs is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep
cmd.finalize_options()
self.assertIn('my_lib_dir', cmd.library_dirs)
self.assertIn('other_lib_dir', cmd.library_dirs)
# make sure rpath is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.rpath = 'one%stwo' % os.pathsep
cmd.finalize_options()
self.assertEqual(cmd.rpath, ['one', 'two'])
# XXX more tests to perform for win32
# make sure define is turned into 2-tuples
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.define = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
# make sure undef is turned into a list of
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.undef = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.undef, ['one', 'two'])
# make sure swig_opts is turned into a list
cmd = build_ext(dist)
cmd.swig_opts = None
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, [])
cmd = build_ext(dist)
cmd.swig_opts = '1 2'
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, ['1', '2'])
def test_check_extensions_list(self):
dist = Distribution()
cmd = build_ext(dist)
cmd.finalize_options()
#'extensions' option must be a list of Extension instances
self.assertRaises(DistutilsSetupError,
cmd.check_extensions_list, 'foo')
# each element of 'ext_modules' option must be an
# Extension instance or 2-tuple
exts = [('bar', 'foo', 'bar'), 'foo']
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# first element of each tuple in 'ext_modules'
# must be the extension name (a string) and match
# a python dotted-separated name
exts = [('foo-bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# second element of each tuple in 'ext_modules'
# must be a ary (build info)
exts = [('foo.bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# ok this one should pass
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
self.assertIsInstance(ext, Extension)
# check_extensions_list adds in ext the values passed
# when they are in ('include_dirs', 'library_dirs', 'libraries'
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
self.assertEqual(ext.libraries, 'foo')
self.assertFalse(hasattr(ext, 'some'))
# 'macros' element of build info dict must be 1- or 2-tuple
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts[0][1]['macros'] = [('1', '2'), ('3',)]
cmd.check_extensions_list(exts)
self.assertEqual(exts[0].undef_macros, ['3'])
self.assertEqual(exts[0].define_macros, [('1', '2')])
def test_get_source_files(self):
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertEqual(cmd.get_source_files(), ['xxx'])
def test_compiler_option(self):
# cmd.compiler is an option and
# should not be overriden by a compiler instance
# when the command is run
dist = Distribution()
cmd = build_ext(dist)
cmd.compiler = 'unix'
cmd.ensure_finalized()
cmd.run()
self.assertEqual(cmd.compiler, 'unix')
def test_get_outputs(self):
tmp_dir = self.mkdtemp()
c_file = os.path.join(tmp_dir, 'foo.c')
self.write_file(c_file, 'void PyInit_foo(void) {}\n')
ext = Extension('foo', [c_file], optional=False)
dist = Distribution({'name': 'xx',
'ext_modules': [ext]})
cmd = build_ext(dist)
fixup_build_ext(cmd)
cmd.ensure_finalized()
self.assertEqual(len(cmd.get_outputs()), 1)
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
# issue #5977 : distutils build_ext.get_outputs
# returns wrong result with --inplace
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
cmd.inplace = 1
cmd.run()
so_file = cmd.get_outputs()[0]
finally:
os.chdir(old_wd)
self.assertTrue(os.path.exists(so_file))
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
self.assertTrue(so_file.endswith(ext_suffix))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, other_tmp_dir)
cmd.inplace = 0
cmd.compiler = None
cmd.run()
so_file = cmd.get_outputs()[0]
self.assertTrue(os.path.exists(so_file))
self.assertTrue(so_file.endswith(ext_suffix))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, cmd.build_lib)
# inplace = 0, cmd.package = 'bar'
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {'': 'bar'}
path = cmd.get_ext_fullpath('foo')
# checking that the last directory is the build_dir
path = os.path.split(path)[0]
self.assertEqual(path, cmd.build_lib)
# inplace = 1, cmd.package = 'bar'
cmd.inplace = 1
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
path = cmd.get_ext_fullpath('foo')
finally:
os.chdir(old_wd)
# checking that the last directory is bar
path = os.path.split(path)[0]
lastdir = os.path.split(path)[-1]
self.assertEqual(lastdir, 'bar')
def test_ext_fullpath(self):
ext = sysconfig.get_config_var('EXT_SUFFIX')
# building lxml.etree inplace
#etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
#etree_ext = Extension('lxml.etree', [etree_c])
#dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
dist = Distribution()
cmd = build_ext(dist)
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building lxml.etree not inplace
cmd.inplace = 0
cmd.build_lib = os.path.join(curdir, 'tmpdir')
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building twisted.runner.portmap not inplace
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {}
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner',
'portmap' + ext)
self.assertEqual(wanted, path)
# building twisted.runner.portmap inplace
cmd.inplace = 1
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
self.assertEqual(wanted, path)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_default(self):
# Issue 9516: Test that, in the absence of the environment variable,
# an extension module is compiled with the same deployment target as
# the interpreter.
self._try_compile_deployment_target('==', None)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_too_low(self):
# Issue 9516: Test that an extension module is not allowed to be
# compiled with a deployment target less than that of the interpreter.
self.assertRaises(DistutilsPlatformError,
self._try_compile_deployment_target, '>', '10.1')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_higher_ok(self):
# Issue 9516: Test that an extension module can be compiled with a
# deployment target higher than that of the interpreter: the ext
# module may depend on some newer OS feature.
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if deptarget:
# increment the minor version number (i.e. 10.6 -> 10.7)
deptarget = [int(x) for x in deptarget.split('.')]
deptarget[-1] += 1
deptarget = '.'.join(str(i) for i in deptarget)
self._try_compile_deployment_target('<', deptarget)
def _try_compile_deployment_target(self, operator, target):
orig_environ = os.environ
os.environ = orig_environ.copy()
self.addCleanup(setattr, os, 'environ', orig_environ)
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACOSX_DEPLOYMENT_TARGET']
else:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
with open(deptarget_c, 'w') as fp:
fp.write(textwrap.dedent('''\
#include <AvailabilityMacros.h>
int dummy;
#if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED
#else
#error "Unexpected target"
#endif
''' % operator))
# get the deployment target that the interpreter was built with
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
target = tuple(map(int, target.split('.')[0:2]))
# format the target value as defined in the Apple
# Availability Macros. We can't use the macro names since
# at least one value we test with will not exist yet.
if target[1] < 10:
# for 10.1 through 10.9.x -> "10n0"
target = '%02d%01d0' % target
else:
# for 10.10 and beyond -> "10nn00"
target = '%02d%02d00' % target
deptarget_ext = Extension(
'deptarget',
[deptarget_c],
extra_compile_args=['-DTARGET=%s'%(target,)],
)
dist = Distribution({
'name': 'deptarget',
'ext_modules': [deptarget_ext]
})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
try:
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
except CompileError:
self.fail("Wrong deployment target during compilation")
def test_suite():
return unittest.makeSuite(BuildExtTestCase)
if __name__ == '__main__':
support.run_unittest(test_suite())
| lgpl-3.0 | 3,797,777,132,041,692,700 | 36.059426 | 83 | 0.582195 | false |
AMICI-developer/AMICI | tests/generateTestConfig/example_calvetti.py | 3 | 1029 | #!/usr/bin/env python3
import sys
import numpy as np
from example import AmiciExample
class ExampleCalvetti(AmiciExample):
def __init__(self):
AmiciExample.__init__( self )
self.numX = 6
self.numP = 0
self.numK = 6
self.modelOptions['theta'] = []
self.modelOptions['kappa'] = [0.29, 0.74, 0.44, 0.08, 0.27, 0.18]
self.modelOptions['ts'] = np.linspace(0, 20, 201)
self.modelOptions['pscale'] = 0
self.solverOptions['atol'] = 1e-6
self.solverOptions['rtol'] = 1e-4
self.solverOptions['sens_ind'] = []
self.solverOptions['sensi'] = 0
self.solverOptions['sensi_meth'] = 1
def writeNoSensi(filename):
ex = ExampleCalvetti()
ex.writeToFile(filename, '/model_calvetti/nosensi/')
def main():
if len(sys.argv) < 2:
print("Error: Must provide output file as first and only argument.")
sys.exit(1)
filename = sys.argv[1]
writeNoSensi(filename)
if __name__ == "__main__":
main()
| bsd-2-clause | -887,145,872,941,134,700 | 22.386364 | 76 | 0.592809 | false |
m000/cliutils | dotfiles/j2_tests.py | 1 | 1327 | # -*- coding: utf-8 -*-
import os
assert sys.version_info >= (2,5), "Need at least Python 2.5."
if sys.version_info < (3,0):
from shutilwhich import which
else:
from shutil import which
### Tests ###########################################################
def exists(p):
''' Returns true if path p exists.
Tilde and shell variables in d are expanded before testing.
'''
if not p:
return False
else:
p = os.path.expandvars(os.path.expanduser(p))
return os.path.exists(p)
def dir(d):
''' Returns true if d is a directory.
Tilde and shell variables in d are expanded before testing.
'''
if not d:
return False
else:
d = os.path.expandvars(os.path.expanduser(d))
return os.path.isdir(d)
def file(f):
''' Returns true if f is a file.
Tilde and shell variables in f are expanded before testing.
'''
if not f:
return False
else:
f = os.path.expandvars(os.path.expanduser(f))
return os.path.isfile(f)
def installed(b):
''' Returns true if an executable named b exists in the current path.
b may also be a list of binaries.
'''
blist = b if isinstance(b, list) else [b,]
return all([which(b) for b in blist])
# vim:set tabstop=4 softtabstop=4 expandtab:
| mit | 4,960,320,225,315,289,000 | 26.645833 | 73 | 0.585531 | false |
varunkamra/kuma | vendor/packages/pygments/formatters/img.py | 78 | 18002 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt, xrange
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mpl-2.0 | 1,930,199,049,884,242,200 | 31.146429 | 84 | 0.546828 | false |
xulesc/algos | knn/scripts/exp3.py | 1 | 6922 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 23 12:12:31 2014
@author: anuj
"""
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import linear_model, datasets
from sklearn.decomposition import PCA
klas_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.klass'
data_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.num'
klasses = np.genfromtxt(klas_file, skip_header=1)
n_data = np.genfromtxt(data_file, usecols=range(2, 11), skip_header=1,delimiter=',', missing_values='?')
np.random.seed(42)
data = normalize(n_data[~np.isnan(n_data).any(axis=1)], axis = 0)
n_samples, n_features = [len(data), len(data[0])]
n_digits = len(np.unique(klasses))
labels = klasses[~np.isnan(n_data).any(axis=1)]
sample_size = int(10.0 * n_samples / 100)
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
#reduced_data = PCA(n_components=2).fit_transform(data)
#kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
#kmeans.fit(reduced_data)
#
## Step size of the mesh. Decrease to increase the quality of the VQ.
#h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
#
## Plot the decision boundary. For that, we will assign a color to each
#x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
#y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
#xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#
## Obtain labels for each point in mesh. Use last trained model.
#Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#
## Put the result into a color plot
#Z = Z.reshape(xx.shape)
#pl.figure(1)
#pl.clf()
#pl.imshow(Z, interpolation='nearest',
# extent=(xx.min(), xx.max(), yy.min(), yy.max()),
# cmap=pl.cm.Paired,
# aspect='auto', origin='lower')
#
#pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
## Plot the centroids as a white X
#centroids = kmeans.cluster_centers_
#pl.scatter(centroids[:, 0], centroids[:, 1],
# marker='x', s=169, linewidths=3,
# color='w', zorder=10)
#pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
# 'Centroids are marked with white cross')
#pl.xlim(x_min, x_max)
#pl.ylim(y_min, y_max)
#pl.xticks(())
#pl.yticks(())
#pl.show()
###############################################################################
data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=0.20, random_state=42)
neigh = KNeighborsClassifier(n_neighbors=5)
neigh.fit(data_train, labels_train)
#print neigh.score(data_test, labels_test)
pred = neigh.predict(data_test)
cm = confusion_matrix(labels_test, pred)
print(cm)
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.show()
###############################################################################
klas_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.klass'
data_file = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.non_num'
data_file2 = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv.num'
klasses = np.genfromtxt(klas_file, skip_header=1)
n_data = np.genfromtxt(data_file, skip_header=1,delimiter=',',dtype='|S5')
n_data_num = np.genfromtxt(data_file2, usecols=range(2, 11), skip_header=1,delimiter=',', missing_values='?')
#n_data = n_data[~np.isnan(n_data).any(axis=1)]
exc = np.isnan(n_data_num).any(axis=1)
n_data_num_n = normalize(n_data_num[~exc], axis = 0)
labels = klasses[~exc]
n_data2 = n_data[~exc]
n_data2 = [x[:len(x) - 1] for x in n_data2]
n_data2 = np.transpose(n_data2)
le = preprocessing.LabelEncoder()
n_data3 = [le.fit(d).transform(d) for d in n_data2]
##############
#f_data = np.transpose(n_data3)
f_data = n_data_num_n
#for x in np.transpose(n_data_num_n):
# f_data.append(x)
#f_data = np.transpose(f_data)
##############
data_train, data_test, labels_train, labels_test = train_test_split(f_data, labels, test_size=0.20, random_state=42)
neigh = KNeighborsClassifier(n_neighbors=1)
#neigh = MultinomialNB()
print('%d:%d\n' %(sum(labels_train),len(labels_train)))
neigh.fit(data_train, labels_train)
#print neigh.score(data_test, labels_test)
pred = neigh.predict(data_test)
print('%d:%d:%d:%d\n' %(sum(labels_test),len(labels_test),sum(pred),len(pred)))
cm = confusion_matrix(labels_test, pred)
print(cm)
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.show()
##############
###############################################################################
f = '/home/anuj/workspace.custom/assignment/data/dataset_diabetes/diabetic_data.csv'
d1 = np.genfromtxt(f, delimiter = ',', names=True)
names = d1.dtype.names
d = np.genfromtxt(f, delimiter = ',', dtype='|S5', skip_header=1)
dc = np.transpose(d)
for x, name in zip(dc, names):
print '%s: %d' %(name, len(np.unique(x)))
##
| gpl-3.0 | -6,247,069,177,228,122,000 | 35.819149 | 116 | 0.648223 | false |
w3nd1go/android_external_skia | tools/skp/page_sets/skia_theverge_desktop.py | 26 | 1224 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_theverge_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(15)
class SkiaThevergeDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaThevergeDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_theverge_desktop.json')
urls_list = [
# Why: from robertphillips.
'http://theverge.com/',
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
| bsd-3-clause | 8,595,825,506,550,447,000 | 28.853659 | 74 | 0.703431 | false |
bewest/openxshareble | openxshareble/app.py | 1 | 5980 |
import Adafruit_BluefruitLE
from Adafruit_BluefruitLE.services import UART as OriginalUART
# from ble import uart
from ble.uart import UART
from ble.readdata import Device
import time
import atexit
import logging
log = logging.getLogger(__name__)
class App (object):
""" A high level application object.
Any application needing to talk to Dexcom G4 + Share will need
to perform operations to setup the ble data transport. This class
mixes the UART, ble code, and provides helpful prolog and epilog
routines that run before and after main, respectively.
"""
def __init__ (self, **kwds):
self.disconnect_on_after = kwds.get('disconnect_on_after', False)
pass
def setup_ble (self):
self.remote = None
self.ble = Adafruit_BluefruitLE.get_provider()
# Initialize the BLE system. MUST be called before other BLE calls!
self.ble.initialize()
# Get the first available BLE network adapter and make sure it's powered on.
self.adapter = self.ble.get_default_adapter()
self.adapter.power_on()
log.info('Using adapter: {0}'.format(self.adapter.name))
self.dexcom = None
pass
def setup_dexcom (self, serial=None, mac=None):
# Once connected do everything else in a try/finally to make sure the device
# is disconnected when done.
try:
# Wait for service discovery to complete for the UART service. Will
# time out after 60 seconds (specify timeout_sec parameter to override).
# print device._device.GattServices
log.info('Discovering services...')
UART.discover(self.remote)
# Once service discovery is complete create an instance of the service
# and start interacting with it.
self.uart = UART(self.remote, SERIAL=serial)
self.dexcom = Device(self.uart)
# log.info("DEXCOM", self.dexcom)
if not self.dexcom:
self.dexcom = Device(self.uart)
except:
# Make sure device is disconnected on exit.
if self.disconnect_on_after:
self.remote.disconnect()
def prolog (self, clear_cached_data=True, disconnect_devices=True, scan_devices=True, connect=True, mac=None):
"""
Things to do before running the main part of the application.
"""
# Clear any cached data because both bluez and CoreBluetooth have issues with
# caching data and it going stale.
if clear_cached_data:
self.ble.clear_cached_data()
if disconnect_devices:
# Disconnect any currently connected UART devices. Good for cleaning up and
# starting from a fresh state.
log.info('Disconnecting any connected UART devices...')
UART.disconnect_devices()
if scan_devices:
# Scan for UART devices.
log.info('Searching for UART device...')
try:
if mac:
self.remote = self.select_mac(mac=mac)
else:
self.adapter.start_scan()
# Search for the first UART device found (will time out after 60 seconds
# but you can specify an optional timeout_sec parameter to change it).
self.remote = UART.find_device()
if self.remote is None:
raise RuntimeError('Failed to find UART device!')
finally:
# Make sure scanning is stopped before exiting.
if self.adapter.is_scanning:
self.adapter.stop_scan()
if connect and not self.remote.is_connected:
log.info('Connecting to device...')
self.remote.connect() # Will time out after 60 seconds, specify timeout_sec parameter
# to change the timeout.
log.info(self.remote.name)
# device._device.Pair( )
# log.info(self.ble._print_tree( ))
for service in self.remote.list_services( ):
log.info("services: %s %s", service, service.uuid)
log.info("ADVERTISED")
log.info(self.remote.advertised)
pass
def select_mac (self, mac=None, **kwds):
for device in self.enumerate_dexcoms(**kwds):
if str(device.id) == mac:
return device
def enumerate_dexcoms (self, timeout_secs=10):
self.adapter.start_scan()
# Use atexit.register to call the adapter stop_scan function before quiting.
# This is good practice for calling cleanup code in this main function as
# a try/finally block might not be called since this is a background thread.
def maybe_stop ( ):
if self.adapter.is_scanning:
self.adapter.stop_scan( )
# atexit.register(maybe_stop)
log.info('Searching for UART devices...')
# print('Press Ctrl-C to quit (will take ~30 seconds on OSX).')
# Enter a loop and print out whenever a new UART device is found.
start = time.time( )
now = time.time( )
known_uarts = set()
while (now - start) < timeout_secs:
# Call UART.find_devices to get a list of any UART devices that
# have been found. This call will quickly return results and does
# not wait for devices to appear.
found = set(UART.find_devices())
# Check for new devices that haven't been seen yet and print out
# their name and ID (MAC address on Linux, GUID on OSX).
new = found - known_uarts
for device in new:
log.info('Found UART: {0} [{1}]'.format(device.name, device.id))
known_uarts.update(new)
# Sleep for a second and see if new devices have appeared.
time.sleep(1.0)
now = time.time( )
self.adapter.stop_scan( )
return known_uarts
def epilog (self):
"""
Things to do after running the main part of the application.
"""
# Make sure device is disconnected on exit.
if self.disconnect_on_after and self.remote.is_connected:
self.remote.disconnect()
# self.ble._gobject_mainloop.quit( )
pass
def set_handler (self, handler):
self.handler = handler
def run (self):
self.ble.run_mainloop_with(self.main)
pass
def main (self):
"""
Subclasses should replace this method.
"""
| mit | -5,777,387,970,777,378,000 | 36.848101 | 112 | 0.655518 | false |
imankulov/sentry | src/sentry/rules/base.py | 15 | 3204 | """
sentry.rules.base
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
Rules apply either before an event gets stored, or immediately after.
Basic actions:
- I want to get notified when [X]
- I want to group events when [X]
- I want to scrub data when [X]
Expanded:
- I want to get notified when an event is first seen
- I want to get notified when an event is marked as a regression
- I want to get notified when the rate of an event increases by [100%]
- I want to get notified when an event has been seen more than [100] times
- I want to get notified when an event matches [conditions]
- I want to group events when an event matches [conditions]
Rules get broken down into two phases:
- An action
- A rule condition
A condition itself may actually be any number of things, but that is determined
by the rule's logic. Each rule condition may be associated with a form.
- [ACTION:I want to get notified when] [RULE:an event is first seen]
- [ACTION:I want to group events when] [RULE:an event matches [FORM]]
"""
from __future__ import absolute_import
import logging
import re
from collections import namedtuple
from django.utils.html import escape
from django.utils.safestring import mark_safe
CallbackFuture = namedtuple('CallbackFuture', ['callback', 'kwargs'])
class RuleDescriptor(type):
def __new__(cls, *args, **kwargs):
new_cls = super(RuleDescriptor, cls).__new__(cls, *args, **kwargs)
new_cls.id = '%s.%s' % (new_cls.__module__, new_cls.__name__)
return new_cls
class RuleBase(object):
label = None
form_cls = None
logger = logging.getLogger('sentry.rules')
__metaclass__ = RuleDescriptor
def __init__(self, project, data=None, rule=None):
self.project = project
self.data = data or {}
self.had_data = data is not None
self.rule = rule
def get_option(self, key):
return self.data.get(key)
def get_form_instance(self):
if self.had_data:
data = self.data
else:
data = None
return self.form_cls(data)
def render_label(self):
return self.label.format(**self.data)
def render_form(self):
if not self.form_cls:
return self.label
form = self.get_form_instance()
def replace_field(match):
field = match.group(1)
return unicode(form[field])
return mark_safe(re.sub(r'{([^}]+)}', replace_field, escape(self.label)))
def validate_form(self):
if not self.form_cls:
return True
form = self.get_form_instance()
return form.is_valid()
def future(self, callback, **kwargs):
return CallbackFuture(
callback=callback,
kwargs=kwargs,
)
class EventState(object):
def __init__(self, is_new, is_regression, is_sample, rule_is_active,
rule_last_active):
self.is_new = is_new
self.is_regression = is_regression
self.is_sample = is_sample,
self.rule_is_active = rule_is_active
self.rule_last_active = rule_last_active
| bsd-3-clause | -1,254,140,393,992,291,000 | 25.92437 | 81 | 0.640449 | false |
will-cromar/needy | ProofOfConcept/toyNetwork.py | 1 | 1982 | class Neuron:
'Represents a neuron'
currentVal = 0
threshold = 1
connections = []
identity = 0
def displayVal(self):
print(self.identity,":",self.currentVal)
for c in self.connections:
print(self.identity," connected to ",c.end.identity)
def addSynapse(self,destination):
print(self.identity," connecting to ",destination.identity)
connection = Synapse()
connection.start = self
connection.end = destination
self.connections.append(connection)
def fire(self):
if self.connections.__len__()==0 :
print(self.currentVal)
else:
for connection in self.connections:
print(self.identity," firing on ",connection.end.identity)
connection.end.currentVal+=connection.modifier*self.currentVal
self.currentVal = self.currentVal/2
def go(self):
if(self.currentVal>self.threshold):
self.fire()
self.currentVal-=.05
if(self.currentVal<0):
self.currentVal=0
class Synapse:
start = Neuron()
end = Neuron()
modifier = .75
numLayers = 5
nodesPerLayer = 3
layers = []
first = Neuron()
firstlayer = [first]
layers.append(firstlayer)
for i in range(1, numLayers-1, 1):
hiddenLayer = []
for j in range(0, nodesPerLayer, 1):
new = Neuron()
new.identity = i*nodesPerLayer+j
hiddenLayer.append(new)
print(layers[len(layers)-1])
for k in layers[i - 1]:
k.addSynapse(new)
layers.append(hiddenLayer)
finaLayer = []
final = Neuron()
final.identity=numLayers*nodesPerLayer+1
finaLayer.append(final)
for k in layers[layers.__len__()-1]:
k.addSynapse(final)
layers.append(finaLayer)
all = []
for i in range(0,layers.__len__(),1):
for j in layers[i]:
all.append(j)
for i in range(0,10,1):
first.currentVal+= input("Thing:")
for n in all:
n.go()
n.displayVal() | mit | -801,217,288,847,883,000 | 25.44 | 78 | 0.608981 | false |
mantidproject/mantid | Framework/PythonInterface/test/python/plugins/functions/PrimStretchedExpFTTest.py | 3 | 1898 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from StretchedExpFTTestHelper import isregistered, do_fit
class PrimStretchedExpFTTest(unittest.TestCase):
def testRegistered(self):
self.assertTrue(*isregistered('PrimStretchedExpFT'))
def testGaussian(self):
""" Test PrimStretchedExpFT against the binned-integrated of
the Fourier transform of a gaussian
"""
# Target parameters
tg = {'tau': 20.0, # picoseconds
'beta': 2.0, # gaussian
'height': 1.0 # We want identity, not just proporcionality
}
# Initial guess reasonably far from target parameters
fString = "name=PrimStretchedExpFT,Height=2.0,Tau=30,Beta=1.5,Centre=0.0002;" +\
"name=FlatBackground,A0=0.0"
# Carry out the fit
self.assertTrue(*do_fit(tg, fString, 'GaussianIntegrated'))
def testLorentzian(self):
""" Test PrimStretchedExpFT against the binned-integrated of
the Fourier transform of a exponential
"""
# Target parameters
tg = {'tau': 100.0, # picoseconds
'beta': 1.0, # exponential
'height': 1.0 # We want identity, not just proporcionality
}
# Initial guess reasonably far from target parameters
fString = "name=PrimStretchedExpFT,Height=2.0,Tau=300,Beta=1.5,Centre=0.0002;" +\
"name=FlatBackground,A0=0.0"
# carry out the fit
self.assertTrue(*do_fit(tg, fString, 'LorentzianIntegrated'))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 2,611,006,478,260,727,000 | 39.382979 | 89 | 0.632244 | false |
Drooids/odoo | addons/mail/tests/test_mail_features.py | 172 | 59265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..mail_mail import mail_mail
from ..mail_thread import mail_thread
from .common import TestMail
from openerp.tools import mute_logger, email_split, html2plaintext
from openerp.tools.mail import html_sanitize
class test_mail(TestMail):
def test_000_alias_setup(self):
""" Test basic mail.alias setup works, before trying to use them for routing """
cr, uid = self.cr, self.uid
self.user_valentin_id = self.res_users.create(cr, uid,
{'name': 'Valentin Cognito', 'email': '[email protected]', 'login': 'valentin.cognito', 'alias_name': 'valentin.cognito'})
self.user_valentin = self.res_users.browse(cr, uid, self.user_valentin_id)
self.assertEquals(self.user_valentin.alias_name, self.user_valentin.login, "Login should be used as alias")
self.user_pagan_id = self.res_users.create(cr, uid,
{'name': 'Pagan Le Marchant', 'email': '[email protected]', 'login': '[email protected]', 'alias_name': '[email protected]'})
self.user_pagan = self.res_users.browse(cr, uid, self.user_pagan_id)
self.assertEquals(self.user_pagan.alias_name, 'plmarchant', "If login is an email, the alias should keep only the local part")
self.user_barty_id = self.res_users.create(cr, uid,
{'name': 'Bartholomew Ironside', 'email': '[email protected]', 'login': 'b4r+_#_R3wl$$', 'alias_name': 'b4r+_#_R3wl$$'})
self.user_barty = self.res_users.browse(cr, uid, self.user_barty_id)
self.assertEquals(self.user_barty.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
def test_00_followers_function_field(self):
""" Tests designed for the many2many function field 'follower_ids'.
We will test to perform writes using the many2many commands 0, 3, 4,
5 and 6. """
cr, uid, user_admin, partner_bert_id, group_pigs = self.cr, self.uid, self.user_admin, self.partner_bert_id, self.group_pigs
# Data: create 'disturbing' values in mail.followers: same res_id, other res_model; same res_model, other res_id
group_dummy_id = self.mail_group.create(cr, uid,
{'name': 'Dummy group'}, {'mail_create_nolog': True})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.thread', 'res_id': self.group_pigs_id, 'partner_id': partner_bert_id})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.group', 'res_id': group_dummy_id, 'partner_id': partner_bert_id})
# Pigs just created: should be only Admin as follower
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Subscribe Bert through a '4' command
group_pigs.write({'message_follower_ids': [(4, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the only Pigs fans')
# Unsubscribe Bert through a '3' command
group_pigs.write({'message_follower_ids': [(3, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Set followers through a '6' command
group_pigs.write({'message_follower_ids': [(6, 0, [partner_bert_id])]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the only Pigs fan')
# Add a follower created on the fly through a '0' command
group_pigs.write({'message_follower_ids': [(0, 0, {'name': 'Patrick Fiori'})]})
partner_patrick_id = self.res_partner.search(cr, uid, [('name', '=', 'Patrick Fiori')])[0]
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, partner_patrick_id]), 'Bert and Patrick should be the only Pigs fans')
# Finally, unlink through a '5' command
group_pigs.write({'message_follower_ids': [(5, 0)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertFalse(follower_ids, 'Pigs group should not have fans anymore')
# Test dummy data has not been altered
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.thread'), ('res_id', '=', self.group_pigs_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the follower of dummy mail.thread data')
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', group_dummy_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the followers of dummy mail.group data')
def test_05_message_followers_and_subtypes(self):
""" Tests designed for the subscriber API as well as message subtypes """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
# Data: message subtypes
self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.group'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_other_def', 'default': True, 'res_model': 'crm.lead'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_def', 'default': True, 'res_model': False})
mt_mg_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.group'})
mt_all_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_nodef', 'default': False, 'res_model': False})
default_group_subtypes = self.mail_message_subtype.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', 'mail.group'), ('res_model', '=', False)])
# ----------------------------------------
# CASE1: test subscriptions with subtypes
# ----------------------------------------
# Do: subscribe Raoul, should have default subtypes
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set(default_group_subtypes),
'message_subscribe: Raoul subscription subtypes are incorrect, should be all default ones')
# Do: subscribe Raoul with specified new subtypes
group_pigs.message_subscribe_users([user_raoul.id], subtype_ids=[mt_mg_nodef])
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: 2 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
])
self.assertEqual(len(fol_ids), 2,
'message_subscribe: subscribing an already-existing follower should not create new entries in mail.followers')
# Test: Raoul follows only specified subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Subscribe Raoul without specified subtypes: should not erase existing subscription subtypes
group_pigs.message_subscribe_users([user_raoul.id, user_raoul.id])
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Unsubscribe Raoul twice through message_unsubscribe_users
group_pigs.message_unsubscribe_users([user_raoul.id, user_raoul.id])
group_pigs.refresh()
# Test: 1 follower (Admin)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(follower_ids, [user_admin.partner_id.id], 'Admin must be the only Pigs fan')
# Test: 1 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id)
])
self.assertEqual(len(fol_ids), 1,
'message_subscribe: group should have only 1 entry in mail.follower for 1 follower')
# Do: subscribe Admin with subtype_ids
group_pigs.message_subscribe_users([uid], [mt_mg_nodef, mt_all_nodef])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id), ('partner_id', '=', user_admin.partner_id.id)])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef, mt_all_nodef]), 'subscription subtypes are incorrect')
# ----------------------------------------
# CASE2: test mail_thread fields
# ----------------------------------------
subtype_data = group_pigs._get_subscription_data(None, None)[group_pigs.id]['message_subtype_data']
self.assertEqual(set(subtype_data.keys()), set(['Discussions', 'mt_mg_def', 'mt_all_def', 'mt_mg_nodef', 'mt_all_nodef']), 'mail.group available subtypes incorrect')
self.assertFalse(subtype_data['Discussions']['followed'], 'Admin should not follow Discussions in pigs')
self.assertTrue(subtype_data['mt_mg_nodef']['followed'], 'Admin should follow mt_mg_nodef in pigs')
self.assertTrue(subtype_data['mt_all_nodef']['followed'], 'Admin should follow mt_all_nodef in pigs')
def test_11_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Test URL formatting
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_bert)
self.assertEqual(url, None,
'notification email: mails send to a not-user partner should not have any URL')
# Test: link for user -> signin
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('model=mail.group', url,
'notification email: link should contain the model when having not notification email on a record')
self.assertIn('res_id=%s' % group_pigs.id, url,
'notification email: link should contain the res_id when having not notification email on a record')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'notification': True, 'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('message_id=%s' % mail.mail_message_id.id, url,
'notification email: link based on message should contain the mail_message id')
self.assertNotIn('model=mail.group', url,
'notification email: link based on message should not contain model')
self.assertNotIn('res_id=%s' % group_pigs.id, url,
'notification email: link based on message should not contain res_id')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_12_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
# Raoul has read access to Pigs -> should redirect to form view of Pigs
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
# Bert has no read access to Pigs -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
def test_20_message_post(self):
""" Tests designed for message_post. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a', 'notify_email': 'always'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'none'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Attachments
attach1_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach2_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach3_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach3', 'datas_fname': 'Attach3',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
# 5 - Mail data
_subject = 'Pigs'
_mail_subject = 'Re: %s' % (group_pigs.name)
_body1 = '<p>Pigs rules</p>'
_body2 = '<html>Pigs rocks</html>'
_attachments = [
('List1', 'My first attachment'),
('List2', 'My second attachment')
]
# --------------------------------------------------
# CASE1: post comment + partners + attachments
# --------------------------------------------------
# Data: set alias_domain to see emails with alias
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.domain', 'schlouby.fr')
# Data: change Pigs name to test reply_to
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': '"Pigs" !ù $%-'})
# Do: subscribe Raoul
new_follower_ids = [self.partner_raoul_id]
group_pigs.message_subscribe(new_follower_ids)
# Test: group followers = Raoul + uid
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_subscribe: incorrect followers after subscribe')
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg1_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body1, subject=_subject, partner_ids=[p_b_id, p_c_id],
attachment_ids=[attach1_id, attach2_id], attachments=_attachments,
type='comment', subtype='mt_comment')
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_message_id = msg.message_id
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject and body not modified
self.assertEqual(_subject, msg.subject, 'message_post: mail.message subject incorrect')
self.assertEqual(_body1, msg.body, 'message_post: mail.message body incorrect')
# Test: mail_message: notified_partner_ids = group followers + partner_ids - author
test_pids = set([self.partner_admin_id, p_b_id, p_c_id])
self.assertEqual(test_pids, set(msg_pids), 'message_post: mail.message notified partners incorrect')
# Test: mail_message: attachments (4, attachment_ids + attachments)
test_aids = set([attach1_id, attach2_id])
msg_attach_names = set([attach.name for attach in msg.attachment_ids])
test_attach_names = set(['Attach1', 'Attach2', 'List1', 'List2'])
self.assertEqual(len(msg_aids), 4,
'message_post: mail.message wrong number of attachments')
self.assertEqual(msg_attach_names, test_attach_names,
'message_post: mail.message attachments incorrectly added')
self.assertTrue(test_aids.issubset(set(msg_aids)),
'message_post: mail.message attachments duplicated')
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachments were not linked to the document')
self.assertEqual(attach.res_id, group_pigs.id,
'message_post: mail.message attachments were not linked to the document')
if 'List' in attach.name:
self.assertIn((attach.name, attach.datas.decode('base64')), _attachments,
'message_post: mail.message attachment name / data incorrect')
dl_attach = self.mail_message.download_attachment(cr, user_raoul.id, id_message=msg.id, attachment_id=attach.id)
self.assertIn((dl_attach['filename'], dl_attach['base64'].decode('base64')), _attachments,
'message_post: mail.message download_attachment is incorrect')
# Test: followers: same as before (author was already subscribed)
group_pigs.refresh()
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_post: wrong followers after posting')
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg1_id)]),
'message_post: mail.mail notifications should have been auto-deleted!')
# Test: notifications emails: to a and b, c is email only, r is author
test_emailto = ['Administrator <a@a>', 'Bert Tartopoils <b@b>']
# test_emailto = ['"Followers of -Pigs-" <a@a>', '"Followers of -Pigs-" <b@b>']
self.assertEqual(len(sent_emails), 2,
'message_post: notification emails wrong number of send emails')
self.assertEqual(set([m['email_to'][0] for m in sent_emails]), set(test_emailto),
'message_post: notification emails wrong recipients (email_to)')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <[email protected]>',
'message_post: notification email wrong email_from: should use alias of sender')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(sent_email['reply_to'], u'"YourCompany \\"Pigs\\" !ù $%-" <[email protected]>',
'message_post: notification email reply_to incorrect')
self.assertEqual(_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(_body1, sent_email['body'],
'message_post: notification email body incorrect')
self.assertIn('Pigs rules', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertFalse(sent_email['references'],
'message_post: references should be False when sending a message that is not a reply')
# Test: notification linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg1_id)])
notif_pids = set([notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)])
self.assertEqual(notif_pids, test_pids,
'message_post: mail.message created mail.notification incorrect')
# Data: Pigs name back to normal
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': 'Pigs'})
# --------------------------------------------------
# CASE2: reply + parent_id + parent notification
# --------------------------------------------------
# Data: remove alias_domain to see emails with alias
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg2_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body2, type='email', subtype='mt_comment',
partner_ids=[p_d_id], parent_id=msg1_id, attachment_ids=[attach3_id],
context={'mail_post_autofollow': True})
msg = self.mail_message.browse(cr, uid, msg2_id)
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject is False, body, parent_id is msg_id
self.assertEqual(msg.subject, False, 'message_post: mail.message subject incorrect')
self.assertEqual(msg.body, html_sanitize(_body2), 'message_post: mail.message body incorrect')
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post: mail.message parent_id incorrect')
# Test: mail_message: notified_partner_ids = group followers
test_pids = [self.partner_admin_id, p_d_id]
self.assertEqual(set(test_pids), set(msg_pids), 'message_post: mail.message partners incorrect')
# Test: mail_message: notifications linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg2_id)])
notif_pids = [notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)]
self.assertEqual(set(test_pids), set(notif_pids), 'message_post: mail.message notification partners incorrect')
# Test: mail_mail: notifications deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg2_id)]), 'mail.mail notifications should have been auto-deleted!')
# Test: emails send by server (to a, b, c, d)
test_emailto = [u'Administrator <a@a>', u'Bert Tartopoils <b@b>', u'Carine Poilvache <c@c>', u'D\xe9d\xe9 Grosbedon <d@d>']
# test_emailto = [u'"Followers of Pigs" <a@a>', u'"Followers of Pigs" <b@b>', u'"Followers of Pigs" <c@c>', u'"Followers of Pigs" <d@d>']
# self.assertEqual(len(sent_emails), 3, 'sent_email number of sent emails incorrect')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <r@r>',
'message_post: notification email wrong email_from: should use email of sender when no alias domain set')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(email_split(sent_email['reply_to']), ['r@r'], # was '"Followers of Pigs" <r@r>', but makes no sense
'message_post: notification email reply_to incorrect: should have raoul email')
self.assertEqual(_mail_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(html_sanitize(_body2), sent_email['body'],
'message_post: notification email does not contain the body')
self.assertIn('Pigs rocks', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertIn(msg_message_id, sent_email['references'],
'message_post: notification email references lacks parent message message_id')
# Test: attachments + download
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachment res_model incorrect')
self.assertEqual(attach.res_id, self.group_pigs_id,
'message_post: mail.message attachment res_id incorrect')
# Test: Dédé has been notified -> should also have been notified of the parent message
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_pids = set([partner.id for partner in msg.notified_partner_ids])
test_pids = set([self.partner_admin_id, p_b_id, p_c_id, p_d_id])
self.assertEqual(test_pids, msg_pids, 'message_post: mail.message parent notification not created')
# Do: reply to last message
msg3_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id, body='Test', parent_id=msg2_id)
msg = self.mail_message.browse(cr, uid, msg3_id)
# Test: check that its parent will be the first message
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post did not flatten the thread structure')
def test_25_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
mail_compose = self.registry('mail.compose.message')
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'always'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Create a Bird mail.group, that will be used to test mass mailing
group_bird_id = self.mail_group.create(cr, uid,
{
'name': 'Bird',
'description': 'Bird resistance',
}, context={'mail_create_nolog': True})
group_bird = self.mail_group.browse(cr, uid, group_bird_id)
# 5 - Mail data
_subject = 'Pigs'
_body = 'Pigs <b>rule</b>'
_reply_subject = 'Re: %s' % _subject
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': 'My first attachment'.encode('base64')},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': 'My second attachment'.encode('base64')}
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# 6 - Subscribe Bert to Pigs
group_pigs.message_subscribe([p_b_id])
# --------------------------------------------------
# CASE1: wizard + partners + context keys
# --------------------------------------------------
# Do: Raoul wizard-composes on Pigs with auto-follow for partners, not for author
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': _body,
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: composition_mode, model, res_id
self.assertEqual(compose.composition_mode, 'comment', 'compose wizard: mail.compose.message incorrect composition_mode')
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
# Do: Post the comment
mail_compose.send_mail(cr, user_raoul.id, [compose_id], {'mail_post_autofollow': True, 'mail_create_nosubscribe': True})
group_pigs.refresh()
message = group_pigs.message_ids[0]
# Test: mail.group: followers (c and d added by auto follow key; raoul not added by nosubscribe key)
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Test: mail.message: subject, body inside p
self.assertEqual(message.subject, _subject, 'compose wizard: mail.message incorrect subject')
self.assertEqual(message.body, '<p>%s</p>' % _body, 'compose wizard: mail.message incorrect body')
# Test: mail.message: notified_partner_ids = admin + bert (followers) + c + d (recipients)
msg_pids = [partner.id for partner in message.notified_partner_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(msg_pids), set(test_pids),
'compose wizard: mail.message notified_partner_ids incorrect')
# --------------------------------------------------
# CASE2: reply + attachments
# --------------------------------------------------
# Do: Reply with attachments
compose_id = mail_compose.create(cr, user_raoul.id,
{
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])]
}, context={
'default_composition_mode': 'comment',
'default_res_id': self.group_pigs_id,
'default_parent_id': message.id
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: model, res_id, parent_id
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
self.assertEqual(compose.parent_id.id, message.id, 'compose wizard: mail.compose.message incorrect parent_id')
# Test: mail.compose.message: subject as Re:.., body, parent_id
self.assertEqual(compose.subject, _reply_subject, 'compose wizard: mail.compose.message incorrect subject')
self.assertFalse(compose.body, 'compose wizard: mail.compose.message body should not contain parent message body')
self.assertEqual(compose.parent_id and compose.parent_id.id, message.id, 'compose wizard: mail.compose.message parent_id incorrect')
# Test: mail.compose.message: attachments
for attach in compose.attachment_ids:
self.assertIn((attach.datas_fname, attach.datas.decode('base64')), _attachments_test,
'compose wizard: mail.message attachment name / data incorrect')
# --------------------------------------------------
# CASE3: mass_mail on Pigs and Bird
# --------------------------------------------------
# Do: Compose in mass_mail_mode on pigs and bird
compose_id = mail_compose.create(
cr, user_raoul.id, {
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id, group_bird_id],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
# check mail_mail
mail_mail_ids = self.mail_mail.search(cr, uid, [('subject', '=', _subject)])
for mail_mail in self.mail_mail.browse(cr, uid, mail_mail_ids):
self.assertEqual(set([p.id for p in mail_mail.recipient_ids]), set([p_c_id, p_d_id]),
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check logged messages
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
# Test: mail.message: subject, body, subtype, notified partners (nobody + specific recipients)
self.assertEqual(message1.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message1.body, '<p>%s</p>' % group_pigs.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message1.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
self.assertEqual(message2.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message2.body, '<p>%s</p>' % group_bird.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message2.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
# Test: mail.group followers: author not added as follower in mass mail mode
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
bird_pids = [p.id for p in group_bird.message_follower_ids]
test_pids = [self.partner_admin_id]
self.assertEqual(set(bird_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Do: Compose in mass_mail, coming from list_view, we have an active_domain that should be supported
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id],
'active_domain': [('name', 'in', ['Pigs', 'Bird'])],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(
cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
def test_30_needaction(self):
""" Tests for mail.message needaction. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
na_admin_base = self.mail_message._needaction_count(cr, uid, domain=[])
na_demo_base = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
# Test: number of unread notification = needaction on mail.message
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
na_count = self.mail_message._needaction_count(cr, uid, domain=[])
self.assertEqual(len(notif_ids), na_count, 'unread notifications count does not match needaction count')
# Do: post 2 message on group_pigs as admin, 3 messages as demo user
for dummy in range(2):
group_pigs.message_post(body='My Body', subtype='mt_comment')
raoul_pigs = group_pigs.sudo(user_raoul)
for dummy in range(3):
raoul_pigs.message_post(body='My Demo Body', subtype='mt_comment')
# Test: admin has 3 new notifications (from demo), and 3 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_admin_base + 3, 'Admin should have 3 new unread notifications')
na_admin = self.mail_message._needaction_count(cr, uid, domain=[])
na_admin_group = self.mail_message._needaction_count(cr, uid, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_admin, na_admin_base + 3, 'Admin should have 3 new needaction')
self.assertEqual(na_admin_group, 3, 'Admin should have 3 needaction related to Pigs')
# Test: demo has 0 new notifications (not a follower, not receiving its own messages), and 0 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_raoul.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_demo_base + 0, 'Demo should have 0 new unread notifications')
na_demo = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
na_demo_group = self.mail_message._needaction_count(cr, user_raoul.id, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_demo, na_demo_base + 0, 'Demo should have 0 new needaction')
self.assertEqual(na_demo_group, 0, 'Demo should have 0 needaction related to Pigs')
def test_40_track_field(self):
""" Testing auto tracking of fields. """
def _strip_string_spaces(body):
return body.replace(' ', '').replace('\n', '')
# Data: subscribe Raoul to Pigs, because he will change the public attribute and may loose access to the record
cr, uid = self.cr, self.uid
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_raoul_id])
# Data: res.users.group, to test group_public_id automatic logging
group_system_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_system')
group_system_id = group_system_ref and group_system_ref[1] or False
# Data: custom subtypes
mt_private_id = self.mail_message_subtype.create(cr, uid, {'name': 'private', 'description': 'Private public'})
self.ir_model_data.create(cr, uid, {'name': 'mt_private', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_private_id})
mt_name_supername_id = self.mail_message_subtype.create(cr, uid, {'name': 'name_supername', 'description': 'Supername name'})
self.ir_model_data.create(cr, uid, {'name': 'mt_name_supername', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_name_supername_id})
mt_group_public_set_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public_set', 'description': 'Group set'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public_set', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_set_id})
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: alter mail_group model for testing purposes (test on classic, selection and many2one fields)
cls = type(self.mail_group)
self.assertNotIn('_track', cls.__dict__)
cls._track = {
'public': {
'mail.mt_private': lambda self, cr, uid, obj, ctx=None: obj.public == 'private',
},
'name': {
'mail.mt_name_supername': lambda self, cr, uid, obj, ctx=None: obj.name == 'supername',
},
'group_public_id': {
'mail.mt_group_public_set': lambda self, cr, uid, obj, ctx=None: obj.group_public_id,
'mail.mt_group_public': lambda self, cr, uid, obj, ctx=None: True,
},
}
visibility = {'public': 'onchange', 'name': 'always', 'group_public_id': 'onchange'}
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
delattr(cls, '_track')
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 1, 'tracked: a message should have been produced')
# Test: first produced message: no subtype, name change tracked
last_msg = self.group_pigs.message_ids[-1]
self.assertFalse(last_msg.subtype_id, 'tracked: message should not have been linked to a subtype')
self.assertIn(u'SelectedGroupOnly\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn('Pigs', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change name as supername, public as private -> 2 subtypes
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'name': 'supername', 'public': 'private'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 3, 'tracked: two messages should have been produced')
# Test: first produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-2]
self.assertEqual(last_msg.subtype_id.id, mt_private_id, 'tracked: message should be linked to mt_private subtype')
self.assertIn('Private public', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
# Test: second produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-3]
self.assertEqual(last_msg.subtype_id.id, mt_name_supername_id, 'tracked: message should be linked to mt_name_supername subtype')
self.assertIn('Supername name', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Public\u2192Private', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked feature: message body does not hold always tracked field')
# Test: change public as public, group_public_id -> 2 subtypes, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public', 'group_public_id': group_system_id})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 5, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-4]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_set_id, 'tracked: message should be linked to mt_group_public_set_id')
self.assertIn('Group set', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: second produced message: mt_group_public_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-5]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change group_public_id to False -> 1 subtype, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'group_public_id': False})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-6]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Administration/Settings\u2192', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change not tracked field, no tracking message
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'description': 'Dummy'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: No message should have been produced')
| agpl-3.0 | -5,033,150,761,180,360,000 | 64.041712 | 175 | 0.61261 | false |
ofanoyi/sumatrapdf | scripts/gen_settings_html.py | 1 | 9699 | #!/usr/bin/env python
import os, util2, gen_settingsstructs, trans_langs
"""
TODO:
* for gen_langs_html, show languages that don't have enough translations
in a separate table
"""
g_version = util2.get_sumatrapdf_version()
html_tmpl = """\
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Customizing SumatraPDF</title>
<style type=text/css>
body {
font-size: 90%;
background-color: #f5f5f5;
}
.desc {
padding: 0px 10px 0px 10px;
}
.txt1 {
/* bold doesn't look good in the fonts above */
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
font-size: 88%;
color: #800; /* this is brown */
}
.txt2 {
font-family: Verdana, Arial, sans-serif;
font-family: serif;
font-size: 90%;
font-weight: bold;
color: #800; /* this is brown */
}
.txt {
font-family: serif;
font-size: 95%;
font-weight: bold;
color: #800; /* this is brown */
color: #000;
background-color: #ececec;
border: 1px solid #fff;
border-radius: 10px;
-webkit-border-radius: 10px;
box-shadow: rgba(0, 0, 0, .15) 3px 3px 4px;
-webkit-box-shadow: rgba(0, 0, 0, .15) 3px 3px 4px;
padding: 10px 10px 10px 20px;
}
.cm {
color: #800; /* this is brown, a bit aggressive */
color: #8c8c8c; /* this is gray */
color: #555; /* this is darker gray */
font-weight: normal;
}
</style>
</head>
<body>
<div class=desc>
<h2>Customizing SumatraPDF</h2>
<p>You can change the look and behavior of
<a href="http://blog.kowalczyk.info/software/sumatrapdf/">SumatraPDF</a>
by editing the file <code>SumatraPDF-settings.txt</code>. The file is stored in
<code>%APPDATA%\SumatraPDF</code> directory for the installed version or in the
same directory as <code>SumatraPDF.exe</code> executable for the portable version.</p>
<p>Use the menu item <code>Settings -> Advanced Settings...</code> to open the settings file
with your default text editor.</p>
<p>The file is in a simple text format. Below is an explanation of
what the different settings mean and what their default values are.</p>
<p>Highlighted settings can't be changed from the UI. Modifying other settings
directly in this file is not recommended.</p>
<p>If you add or remove lines with square brackets, <b>make sure to always add/remove
square brackets in pairs</b>! Else you risk losing all the data following them.</p>
</div>
<pre class=txt>
%INSIDE%
</pre>
<div class=desc>
<h3 id="color">Syntax for color values</h3>
<p>
The syntax for colors is: <code>#rrggbb</code>.</p>
<p>The components are hex values (ranging from 00 to FF) and stand for:
<ul>
<li><code>rr</code> : red component</li>
<li><code>gg</code> : green component</li>
<li><code>bb</code> : blue component</li>
</ul>
For example #ff0000 means red color. You can use <a href="http://www.colorpicker.com/">
Color Picker</a> or <a href="http://mudcu.be/sphere/">Sphere</a> or
<a href="http://colorschemedesigner.com/">ColorScheme Designer</a> to pick a color.
</p>
</div>
</body>
</html>
"""
langs_html_tmpl = """\
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Languages supported by SumatraPDF</title>
<style type=text/css>
body {
font-size: 90%;
background-color: #f5f5f5;
}
.txt1 {
/* bold doesn't look good in the fonts above */
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
font-size: 88%;
color: #800; /* this is brown */
}
.txt2 {
font-family: Verdana, Arial, sans-serif;
font-family: serif;
font-size: 90%;
font-weight: bold;
color: #800; /* this is brown */
}
.txt {
font-family: serif;
font-size: 95%;
font-weight: bold;
color: #800; /* this is brown */
color: #000;
background-color: #ececec;
}
.cm {
color: #800; /* this is brown, a bit aggressive */
color: #8c8c8c; /* this is gray */
color: #555; /* this is darker gray */
font-weight: normal;
}
</style>
</head>
<body>
<h2>Languages supported by SumatraPDF</h2>
<p>Languages supported by SumatraPDF. You can use ISO code as a value
of <code>UiLanguage</code> setting in <a href="settings.html">settings file</a>.
</p>
<p>Note: not all languages are fully translated. Help us <a href="http://www.apptranslator.org/app/SumatraPDF">translate SumatraPDF</a>.</p>
<table>
<tr><th>Language name</th><th>ISO code</th></tr>
%INSIDE%
</table>
</body>
</html>
"""
#indent_str = " "
indent_str = " "
# if s in the form: "foo](bar.html)", returns ["foo", "bar.html"].
# otherwise returns ["foo"]
def extract_url(s):
if not s.endswith(")"):
return [s]
word_end = s.find("]")
assert word_end != -1
word = s[:word_end]
assert s[word_end+1] == "("
url = s[word_end+2:-1]
return [word, url]
def gen_comment(comment, start, first=False):
line_len = 100
s = start + '<span class=cm>'
if not first:
s = "\n" + s
left = line_len - len(start)
# [foo](bar.html) is turned into <a href="bar.html">foo</a>
href_text = None
for word in comment.split():
if word[0] == "[":
word_url = extract_url(word[1:])
if len(word_url) == 2:
s += '<a href="%s">%s</a>' % (word_url[1], word_url[0])
continue
href_text = word_url[0]
continue
elif href_text != None:
word_url = extract_url(word)
href_text = href_text + " " + word_url[0]
if len(word_url) == 2:
s += '<a href="%s">%s</a> ' % (word_url[1], href_text)
href_text = None
continue
if left < len(word):
s += "\n" + start
left = line_len - len(start)
word += " "
left -= len(word)
if word == "color ":
word = '<a href="#color">color</a> '
elif word == "colors ":
word = '<a href="#color">colors</a> '
s += word
s = s.rstrip()
s += '</span>'
return [s]
def gen_struct(struct, indent="", prerelease=False):
lines = []
first = True
inside_expert = False
for field in struct.default:
if field.internal or type(field) is gen_settingsstructs.Comment or not prerelease and field.prerelease:
continue
start_idx = len(lines)
comment = field.docComment
if field.version != "2.3":
comment += " (introduced in version %s)" % field.version
lines += gen_comment(comment, indent, first)
if type(field) is gen_settingsstructs.Array and not field.type.name == "Compact":
indent2 = indent + indent_str[:len(indent_str)/2]
start = "%s%s [\n%s[" % (indent, field.name, indent2)
end = "%s]\n%s]" % (indent2, indent)
inside = gen_struct(field, indent + indent_str, prerelease)
lines += [start, inside, end]
elif type(field) is gen_settingsstructs.Struct and not field.type.name == "Compact":
start = "%s%s [" % (indent, field.name)
end = "%s]" % indent
inside = gen_struct(field, indent + indent_str, prerelease)
lines += [start, inside, end]
else:
s = field.inidefault(commentChar="").lstrip()
lines += [indent + s]
first = False
if field.expert and not inside_expert:
lines[start_idx] = '<div>' + lines[start_idx]
elif not field.expert and inside_expert:
lines[start_idx] = '</div>' + lines[start_idx]
inside_expert = field.expert
return "\n".join(lines)
class Lang(object):
def __init__(self, name, code):
self.name = name
self.code = code
def blog_dir():
script_dir = os.path.realpath(os.path.dirname(__file__))
blog_dir = os.path.realpath(os.path.join(script_dir, "..", "..", "web", "blog", "www", "software", "sumatrapdf"))
if os.path.exists(blog_dir): return blog_dir
return None
def gen_langs_html():
langs = trans_langs.g_langs
langs = [Lang(el[1], el[0]) for el in langs]
lines = []
langs = sorted(langs, key=lambda lang: lang.name)
for l in langs:
s = '<tr><td>%s</td><td>%s</td></tr>' % (l.name, l.code)
lines += [s]
inside = "\n".join(lines)
s = langs_html_tmpl.replace("%INSIDE%", inside)
file_name = "langs.html"
p = os.path.join("scripts", file_name)
open(p, "w").write(s)
if blog_dir():
p = os.path.join(blog_dir(), file_name)
open(p, "w").write(s)
def gen_html():
prefs = gen_settingsstructs.GlobalPrefs
inside = gen_struct(prefs)
s = html_tmpl.replace("%INSIDE%", inside)
file_name = "settings" + g_version + ".html"
p = os.path.join("scripts", file_name)
open(p, "w").write(s)
if blog_dir():
p = os.path.join(blog_dir(), file_name)
open(p, "w").write(s)
# also save the latest version as settings.html so that there is a
# permament version we can link from from docs that is independent of
# program version number
p = os.path.join(blog_dir(), "settings.html")
open(p, "w").write(s)
if __name__ == "__main__":
util2.chdir_top()
gen_langs_html()
gen_html()
gen_settingsstructs.main()
| gpl-3.0 | -9,178,916,788,926,531,000 | 28.214953 | 140 | 0.576864 | false |
CodeScaleInc/log4django | log4django/templatetags/log4django.py | 1 | 1853 | import json
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.utils.text import normalize_newlines
from ..models import LogRecord
from ..settings import EXTRA_DATA_INDENT, PAGINATOR_RANGE
register = template.Library()
@register.simple_tag
def level_css_class(level):
return {
LogRecord.LEVEL.NOTSET: 'label-default',
LogRecord.LEVEL.DEBUG: 'label-success',
LogRecord.LEVEL.INFO: 'label-info',
LogRecord.LEVEL.WARNING: 'label-warning',
LogRecord.LEVEL.ERROR: 'label-primary',
LogRecord.LEVEL.CRITICAL: 'label-danger'
}[int(level)]
@register.simple_tag
def extra_data(record):
return json.dumps(record.extra, indent=EXTRA_DATA_INDENT)
@register.inclusion_tag('log4django/bootstrap/templatetags/pagination.html', takes_context=True)
def pagination(context, page):
if PAGINATOR_RANGE > page.paginator.num_pages:
range_length = page.paginator.num_pages
else:
range_length = PAGINATOR_RANGE
range_length -= 1
range_min = max(page.number - (range_length / 2), 1)
range_max = min(page.number + (range_length / 2), page.paginator.num_pages)
range_diff = range_max - range_min
if range_diff < range_length:
shift = range_length - range_diff
if range_min - shift > 0:
range_min -= shift
else:
range_max += shift
page_range = range(range_min, range_max + 1)
getvars = context['request'].GET.copy()
getvars.pop('page', None)
return dict(
page=page, page_range=page_range, getvars=getvars
)
@register.filter
@stringfilter
def remove_newlines(text):
normalized_text = normalize_newlines(text)
return mark_safe(normalized_text.replace('\n', ' '))
remove_newlines.is_safe = True
| bsd-3-clause | 4,271,474,956,007,900,000 | 29.883333 | 96 | 0.685915 | false |
youdonghai/intellij-community | python/lib/Lib/site-packages/django/test/testcases.py | 71 | 24716 | import re
import sys
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import clear_url_caches
from django.db import transaction, connection, connections, DEFAULT_DB_ALIAS
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import simplejson, unittest as ut2
from django.utils.encoding import smart_str
from django.utils.functional import wraps
__all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase',
'skipIfDBFeature', 'skipUnlessDBFeature')
try:
all
except NameError:
from django.utils.itercompat import all
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.managed = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"The entry method for doctest output checking. Defers to a sequence of child checkers"
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"The default comparator provided by doctest - not perfect, but good for most purposes"
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
"""Tries to do a 'xml-comparision' of want and got. Plain string
comparision doesn't always work because, for example, attribute
ordering should not be important.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
want, got = self._strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
try:
want_root = parseString(want).firstChild
got_root = parseString(got).firstChild
except:
return False
return check_element(want_root, got_root)
def check_output_json(self, want, got, optionsflags):
"Tries to compare want and got as if they were JSON-encoded data"
want, got = self._strip_quotes(want, got)
try:
want_json = simplejson.loads(want)
got_json = simplejson.loads(got)
except:
return False
return want_json == got_json
def _strip_quotes(self, want, got):
"""
Strip quotes of doctests output values:
>>> o = OutputChecker()
>>> o._strip_quotes("'foo'")
"foo"
>>> o._strip_quotes('"foo"')
"foo"
>>> o._strip_quotes("u'foo'")
"foo"
>>> o._strip_quotes('u"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug_cursor = self.connection.use_debug_cursor
self.connection.use_debug_cursor = True
self.starting_queries = len(self.connection.queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.use_debug_cursor = self.old_debug_cursor
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class TransactionTestCase(ut2.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _fixture_setup(self):
# If the test case has a multi_db=True flag, flush all databases.
# Otherwise, just flush default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
call_command('flush', verbosity=0, interactive=False, database=db)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self.client = self.client_class()
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(TransactionTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
* Force closing the connection, so that the next test gets
a clean cursor.
"""
self._fixture_teardown()
self._urlconf_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect
# of these statements is lost, which can effect the operation
# of tests (e.g., losing a timezone setting causing objects to
# be created with the wrong time).
# To make sure this doesn't happen, get a clean connection at the
# start of every test.
for connection in connections.all():
connection.close()
def _fixture_teardown(self):
pass
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def save_warnings_state(self):
"""
Saves the state of the warnings module
"""
self._warnings_state = get_warnings_state()
def restore_warnings_state(self):
"""
Restores the sate of the warnings module to the state
saved by save_warnings_state()
"""
restore_warnings_state(self._warnings_state)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
real_count = response.content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, text, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % text)
def assertNotContains(self, response, text, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
self.assertEqual(response.content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertTemplateUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names)))
def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertQuerysetEqual(self, qs, values, transform=repr):
return self.assertEqual(map(transform, qs), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
connection = connections[using]
context = _AssertNumQueriesContext(self, num, connection)
if func is None:
return context
# Basically emulate the `with` statement here.
context.__enter__()
try:
func(*args, **kwargs)
except:
context.__exit__(*sys.exc_info())
raise
else:
context.__exit__(*sys.exc_info())
def connections_support_transactions():
"""
Returns True if all connections support transactions. This is messy
because 2.4 doesn't support any or all.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines to
do nothing, and rollsback the test transaction at the end of the test. You have
to use TransactionTestCase, if you need transaction management inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
# If the test case has a multi_db=True flag, setup all databases.
# Otherwise, just use default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
transaction.enter_transaction_management(using=db)
transaction.managed(True, using=db)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
for db in databases:
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db
})
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# If the test case has a multi_db=True flag, teardown all databases.
# Otherwise, just teardown default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
restore_transaction_methods()
for db in databases:
transaction.rollback(using=db)
transaction.leave_transaction_management(using=db)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise ut2.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"Skip a test if a database has the named feature"
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"Skip a test unless a database has the named feature"
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
| apache-2.0 | 5,807,502,767,625,504,000 | 38.294118 | 116 | 0.579665 | false |
rnehra01/tanner | tanner/reporting/log_mongodb.py | 1 | 1148 | import json
try:
import pymongo
MONGO = True
except ImportError:
MONGO = False
from bson.objectid import ObjectId
from gridfs import GridFS
from tanner import config
class Reporting():
def __init__(self):
if MONGO:
# Create the connection
mongo_uri = config.TannerConfig.get('MONGO', 'URI')
connection = pymongo.MongoClient(mongo_uri)
# Connect to Databases.
tandb = connection['tanner']
tandbfs = connection['voldbfs']
# Get Collections
self.tan_sessions = tandb.sessions
self.tan_files = GridFS(tandbfs)
# Indexes
self.tan_sessions.create_index([('$**', 'text')])
else:
print('pymongo not found. pip install pymongo')
def update_session(self, session_id, new_values):
session_id = ObjectId(session_id)
self.tan_sessions.update_one({'_id': session_id}, {"$set": new_values})
return True
def create_session(self, session_data):
session_id = self.tan_sessions.insert_one(session_data).inserted_id
return session_id | gpl-3.0 | 2,444,609,451,415,730,700 | 25.72093 | 79 | 0.600174 | false |
dewtx29/python_ann | project/cg/opengl/cos4102/rectMoveTheta.py | 1 | 2744 | from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
window = 0
width, height = 500, 400
theta = [0.0]
sign = [1.0]
xMin = [-10.0]
xMax = [10.0]
yMin = [-10.0]
yMax= [10.0]
def initialization():
glClearColor(0.0,0.0,0.0,0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(-20, 20, -20, 20);
def draw():
global xMin
global xMax
global yMin
global yMax
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glPushMatrix()
glBegin(GL_POLYGON)
glVertex2f(xMin[0],yMin[0])
glVertex2f(xMin[0],yMax[0])
glVertex2f(xMax[0],yMax[0])
glVertex2f(xMax[0],yMin[0])
glEnd()
glPopMatrix()
glColor3f(1.0, 1.0,1.0);
glPushMatrix()
glRotate(theta[0],0,0,1)
glBegin(GL_LINES)
glVertex2f(-5,-5)
glVertex2f(5,5)
glEnd()
glPopMatrix()
glFlush()
#glutSwapBuffers()
#MyIdle()
#glutPostRedisplay()
def MyIdle():
global theta
theta[0] = theta[0] + 0.1
glutPostRedisplay()
def keyBoardEvent(key,x,y):
global theta
global sign
if (key == "a"):
if(theta[0] > 360 or theta[0] < 0):
sign[0] = sign[0] * -1
theta[0] = theta[0] + (sign[0]*10)
if (key == "b"):
if(theta[0] > 360 or theta[0] < 0):
sign[0] = sign[0] * -1
theta[0] = theta[0] + (sign[0]*(-10))
if (key == "s"):
xMin[0] = xMin[0] + 1
glutPostRedisplay()
def specialKeyEvent(key,x,y):
#print key
global theta
global sign
if (key == GLUT_KEY_LEFT):
if (xMin[0] > -20):
xMin[0] = xMin[0] - 1.0
xMax[0] = xMax[0] - 1.0
if (key == GLUT_KEY_RIGHT):
if (xMax[0] < 20):
xMin[0] = xMin[0] + 1.0
xMax[0] = xMax[0] + 1.0
if (key == GLUT_KEY_DOWN):
if (yMin[0] > -20):
yMin[0] = yMin[0] - 1.0
yMax[0] = yMax[0] - 1.0
if (key == GLUT_KEY_UP):
if (yMax[0] < 20):
yMin[0] = yMin[0] + 1.0
yMax[0] = yMax[0] + 1.0
glutPostRedisplay()
def main():
glutInit()
glutInitDisplayMode(GLUT_RGB )
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
glutCreateWindow("My First GL")
initialization()
glutDisplayFunc(draw)
glutKeyboardFunc(keyBoardEvent)
glutSpecialFunc(specialKeyEvent)
glutIdleFunc(MyIdle)
glutMainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,649,586,430,156,827,000 | 19.325926 | 55 | 0.494898 | false |
otherness-space/myProject | my_project_001/lib/python2.7/site-packages/django/db/models/fields/__init__.py | 13 | 47949 | from __future__ import unicode_literals
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry, total_ordering
from django.utils.itercompat import is_iterator
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
@total_ordering
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in validators.EMPTY_VALUES:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if is_iterator(self._choices):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Automatic key")
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return connection.ops.value_to_db_auto(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value is None:
return value
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator())
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
| mit | 1,544,302,207,578,246,700 | 36.343458 | 83 | 0.59313 | false |
zaabjuda/django-vkontakte-groups-statistic | vkontakte_groups_statistic/migrations/0003_auto__add_field_groupstat_traffic_search_systems__add_field_groupstat_.py | 1 | 27133 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupStat.traffic_search_systems'
db.add_column('vkontakte_groups_groupstat', 'traffic_search_systems',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_external_sites'
db.add_column('vkontakte_groups_groupstat', 'traffic_external_sites',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_my_groups'
db.add_column('vkontakte_groups_groupstat', 'traffic_my_groups',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_news'
db.add_column('vkontakte_groups_groupstat', 'traffic_news',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_top'
db.add_column('vkontakte_groups_groupstat', 'traffic_top',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_search_results'
db.add_column('vkontakte_groups_groupstat', 'traffic_search_results',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_users'
db.add_column('vkontakte_groups_groupstat', 'traffic_users',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_groups'
db.add_column('vkontakte_groups_groupstat', 'traffic_groups',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_applications'
db.add_column('vkontakte_groups_groupstat', 'traffic_applications',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'GroupStat.traffic_bookmarklets'
db.add_column('vkontakte_groups_groupstat', 'traffic_bookmarklets',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupStat.traffic_search_systems'
db.delete_column('vkontakte_groups_groupstat', 'traffic_search_systems')
# Deleting field 'GroupStat.traffic_external_sites'
db.delete_column('vkontakte_groups_groupstat', 'traffic_external_sites')
# Deleting field 'GroupStat.traffic_my_groups'
db.delete_column('vkontakte_groups_groupstat', 'traffic_my_groups')
# Deleting field 'GroupStat.traffic_news'
db.delete_column('vkontakte_groups_groupstat', 'traffic_news')
# Deleting field 'GroupStat.traffic_top'
db.delete_column('vkontakte_groups_groupstat', 'traffic_top')
# Deleting field 'GroupStat.traffic_search_results'
db.delete_column('vkontakte_groups_groupstat', 'traffic_search_results')
# Deleting field 'GroupStat.traffic_users'
db.delete_column('vkontakte_groups_groupstat', 'traffic_users')
# Deleting field 'GroupStat.traffic_groups'
db.delete_column('vkontakte_groups_groupstat', 'traffic_groups')
# Deleting field 'GroupStat.traffic_applications'
db.delete_column('vkontakte_groups_groupstat', 'traffic_applications')
# Deleting field 'GroupStat.traffic_bookmarklets'
db.delete_column('vkontakte_groups_groupstat', 'traffic_bookmarklets')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
'vkontakte_groups_statistic.groupstat': {
'Meta': {'ordering': "('group', 'date')", 'unique_together': "(('group', 'date'),)", 'object_name': 'GroupStat', 'db_table': "'vkontakte_groups_groupstat'"},
'act_members': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'act_visitors': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_photo_comments': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_photos': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_topic_comments': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_topics': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_video_comments': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_videos': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'activity_wall': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'ads_members': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'ads_visitors': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_18': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_18_21': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_21_24': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_24_27': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_27_30': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_30_35': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_35_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'ex_members': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'females': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statistics'", 'to': "orm['vkontakte_groups.Group']"}),
'hidings': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'males': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'members': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'new_members': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_18': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_18_21': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_21_24': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_24_27': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_27_30': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_30_35': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_35_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_age_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_females': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_males': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reach_subsribers': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'references': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_audio': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_discussions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_documents': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_photoalbums': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'section_video': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'shares': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_bookmarklets': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_external_sites': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_groups': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_my_groups': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_news': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_search_results': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_search_systems': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_top': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'traffic_users': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'visitors': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'widget_ex_users': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'widget_members_views': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'widget_new_users': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'widget_users_views': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'vkontakte_groups_statistic.groupstatistic': {
'Meta': {'ordering': "('group', 'date')", 'unique_together': "(('group', 'date'),)", 'object_name': 'GroupStatistic', 'db_table': "'vkontakte_groups_groupstatistic'"},
'age_18': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_18_21': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_21_24': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_24_27': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_27_30': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_30_35': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_35_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'age_45': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'females': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statistics_api'", 'to': "orm['vkontakte_groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'males': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'visitors': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'vkontakte_groups_statistic.groupstatpercentage': {
'Meta': {'ordering': "('group', '-type', 'order')", 'unique_together': "(('group', 'type', 'value_type'),)", 'object_name': 'GroupStatPercentage', 'db_table': "'vkontakte_groups_groupstatpercentage'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'percentage_statistics'", 'to': "orm['vkontakte_groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'percents': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'value': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'value_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': "orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'vkontakte_wall.comment': {
'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': "orm['vkontakte_wall.Post']"}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'vkontakte_wall.post': {
'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'},
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'copy_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_copy_posts'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'copy_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'copy_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Post']", 'null': 'True'}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'like_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'repost_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['vkontakte_groups_statistic'] | bsd-3-clause | -3,150,960,748,678,707,000 | 79.041298 | 213 | 0.578742 | false |
ice9js/servo | tests/wpt/css-tests/css-fonts-3_dev/html/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 | -6,827,572,882,166,484,000 | 28.063786 | 135 | 0.649628 | false |
tusharmakkar08/Diamond | src/diamond/handler/mysql.py | 3 | 2933 | # coding=utf-8
"""
Insert the collected values into a mysql table
"""
from Handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException as e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
| mit | -2,602,224,973,308,052,500 | 25.908257 | 78 | 0.506308 | false |
timpalpant/calibre | src/calibre/gui2/actions/show_quickview.py | 14 | 3875 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QAction
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.quickview import Quickview
from calibre.gui2 import error_dialog
class ShowQuickviewAction(InterfaceAction):
name = 'Show Quickview'
action_spec = (_('Show Quickview'), 'search.png', None, _('Q'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
current_instance = None
def genesis(self):
self.qaction.triggered.connect(self.show_quickview)
self.focus_action = QAction(self.gui)
self.gui.addAction(self.focus_action)
self.gui.keyboard.register_shortcut('Focus To Quickview', _('Focus To Quickview'),
description=_('Move the focus to the Quickview pane/window'),
default_keys=('Shift+Q',), action=self.focus_action,
group=self.action_spec[0])
self.focus_action.triggered.connect(self.focus_quickview)
self.search_action = QAction(self.gui)
self.gui.addAction(self.search_action)
self.gui.keyboard.register_shortcut('Search from Quickview', _('Search from Quickview'),
description=_('Search for the currently selected Quickview item'),
default_keys=('Shift+S',), action=self.search_action,
group=self.action_spec[0])
self.search_action.triggered.connect(self.search_quickview)
self.search_action.changed.connect(self.set_search_shortcut)
self.menuless_qaction.changed.connect(self.set_search_shortcut)
def show_quickview(self, *args):
if self.current_instance:
if not self.current_instance.is_closed:
self.current_instance.reject()
self.current_instance = None
return
self.current_instance = None
if self.gui.current_view() is not self.gui.library_view:
error_dialog(self.gui, _('No quickview available'),
_('Quickview is not available for books '
'on the device.')).exec_()
return
index = self.gui.library_view.currentIndex()
if index.isValid():
self.current_instance = Quickview(self.gui, index)
self.current_instance.reopen_quickview.connect(self.reopen_quickview)
self.set_search_shortcut()
self.current_instance.show()
def set_search_shortcut(self):
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.set_shortcuts(self.search_action.shortcut().toString(),
self.menuless_qaction.shortcut().toString())
def reopen_quickview(self):
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
self.current_instance = None
self.show_quickview()
def change_quickview_column(self, idx):
self.show_quickview()
if self.current_instance:
if self.current_instance.is_closed:
return
self.current_instance.change_quickview_column.emit(idx)
def library_changed(self, db):
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
def focus_quickview(self):
if not (self.current_instance and not self.current_instance.is_closed):
self.show_quickview()
self.current_instance.set_focus()
def search_quickview(self):
if not self.current_instance or self.current_instance.is_closed:
return
self.current_instance.do_search()
| gpl-3.0 | 5,251,983,651,033,428,000 | 40.223404 | 96 | 0.633548 | false |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/test/test_decorators.py | 182 | 9828 | import unittest
from test import support
def funcattrs(**kwds):
def decorate(func):
func.__dict__.update(kwds)
return func
return decorate
class MiscDecorators (object):
@staticmethod
def author(name):
def decorate(func):
func.__dict__['author'] = name
return func
return decorate
# -----------------------------------------------
class DbcheckError (Exception):
def __init__(self, exprstr, func, args, kwds):
# A real version of this would set attributes here
Exception.__init__(self, "dbcheck %r failed (func=%s args=%s kwds=%s)" %
(exprstr, func, args, kwds))
def dbcheck(exprstr, globals=None, locals=None):
"Decorator to implement debugging assertions"
def decorate(func):
expr = compile(exprstr, "dbcheck-%s" % func.__name__, "eval")
def check(*args, **kwds):
if not eval(expr, globals, locals):
raise DbcheckError(exprstr, func, args, kwds)
return func(*args, **kwds)
return check
return decorate
# -----------------------------------------------
def countcalls(counts):
"Decorator to count calls to a function"
def decorate(func):
func_name = func.__name__
counts[func_name] = 0
def call(*args, **kwds):
counts[func_name] += 1
return func(*args, **kwds)
call.__name__ = func_name
return call
return decorate
# -----------------------------------------------
def memoize(func):
saved = {}
def call(*args):
try:
return saved[args]
except KeyError:
res = func(*args)
saved[args] = res
return res
except TypeError:
# Unhashable argument
return func(*args)
call.__name__ = func.__name__
return call
# -----------------------------------------------
class TestDecorators(unittest.TestCase):
def test_single(self):
class C(object):
@staticmethod
def foo(): return 42
self.assertEqual(C.foo(), 42)
self.assertEqual(C().foo(), 42)
def test_staticmethod_function(self):
@staticmethod
def notamethod(x):
return x
self.assertRaises(TypeError, notamethod, 1)
def test_dotted(self):
decorators = MiscDecorators()
@decorators.author('Cleese')
def foo(): return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo.author, 'Cleese')
def test_argforms(self):
# A few tests of argument passing, as we use restricted form
# of expressions for decorators.
def noteargs(*args, **kwds):
def decorate(func):
setattr(func, 'dbval', (args, kwds))
return func
return decorate
args = ( 'Now', 'is', 'the', 'time' )
kwds = dict(one=1, two=2)
@noteargs(*args, **kwds)
def f1(): return 42
self.assertEqual(f1(), 42)
self.assertEqual(f1.dbval, (args, kwds))
@noteargs('terry', 'gilliam', eric='idle', john='cleese')
def f2(): return 84
self.assertEqual(f2(), 84)
self.assertEqual(f2.dbval, (('terry', 'gilliam'),
dict(eric='idle', john='cleese')))
@noteargs(1, 2,)
def f3(): pass
self.assertEqual(f3.dbval, ((1, 2), {}))
def test_dbcheck(self):
@dbcheck('args[1] is not None')
def f(a, b):
return a + b
self.assertEqual(f(1, 2), 3)
self.assertRaises(DbcheckError, f, 1, None)
def test_memoize(self):
counts = {}
@memoize
@countcalls(counts)
def double(x):
return x * 2
self.assertEqual(double.__name__, 'double')
self.assertEqual(counts, dict(double=0))
# Only the first call with a given argument bumps the call count:
#
self.assertEqual(double(2), 4)
self.assertEqual(counts['double'], 1)
self.assertEqual(double(2), 4)
self.assertEqual(counts['double'], 1)
self.assertEqual(double(3), 6)
self.assertEqual(counts['double'], 2)
# Unhashable arguments do not get memoized:
#
self.assertEqual(double([10]), [10, 10])
self.assertEqual(counts['double'], 3)
self.assertEqual(double([10]), [10, 10])
self.assertEqual(counts['double'], 4)
def test_errors(self):
# Test syntax restrictions - these are all compile-time errors:
#
for expr in [ "1+2", "x[3]", "(1, 2)" ]:
# Sanity check: is expr is a valid expression by itself?
compile(expr, "testexpr", "exec")
codestr = "@%s\ndef f(): pass" % expr
self.assertRaises(SyntaxError, compile, codestr, "test", "exec")
# You can't put multiple decorators on a single line:
#
self.assertRaises(SyntaxError, compile,
"@f1 @f2\ndef f(): pass", "test", "exec")
# Test runtime errors
def unimp(func):
raise NotImplementedError
context = dict(nullval=None, unimp=unimp)
for expr, exc in [ ("undef", NameError),
("nullval", TypeError),
("nullval.attr", AttributeError),
("unimp", NotImplementedError)]:
codestr = "@%s\ndef f(): pass\nassert f() is None" % expr
code = compile(codestr, "test", "exec")
self.assertRaises(exc, eval, code, context)
def test_double(self):
class C(object):
@funcattrs(abc=1, xyz="haha")
@funcattrs(booh=42)
def foo(self): return 42
self.assertEqual(C().foo(), 42)
self.assertEqual(C.foo.abc, 1)
self.assertEqual(C.foo.xyz, "haha")
self.assertEqual(C.foo.booh, 42)
def test_order(self):
# Test that decorators are applied in the proper order to the function
# they are decorating.
def callnum(num):
"""Decorator factory that returns a decorator that replaces the
passed-in function with one that returns the value of 'num'"""
def deco(func):
return lambda: num
return deco
@callnum(2)
@callnum(1)
def foo(): return 42
self.assertEqual(foo(), 2,
"Application order of decorators is incorrect")
def test_eval_order(self):
# Evaluating a decorated function involves four steps for each
# decorator-maker (the function that returns a decorator):
#
# 1: Evaluate the decorator-maker name
# 2: Evaluate the decorator-maker arguments (if any)
# 3: Call the decorator-maker to make a decorator
# 4: Call the decorator
#
# When there are multiple decorators, these steps should be
# performed in the above order for each decorator, but we should
# iterate through the decorators in the reverse of the order they
# appear in the source.
actions = []
def make_decorator(tag):
actions.append('makedec' + tag)
def decorate(func):
actions.append('calldec' + tag)
return func
return decorate
class NameLookupTracer (object):
def __init__(self, index):
self.index = index
def __getattr__(self, fname):
if fname == 'make_decorator':
opname, res = ('evalname', make_decorator)
elif fname == 'arg':
opname, res = ('evalargs', str(self.index))
else:
assert False, "Unknown attrname %s" % fname
actions.append('%s%d' % (opname, self.index))
return res
c1, c2, c3 = map(NameLookupTracer, [ 1, 2, 3 ])
expected_actions = [ 'evalname1', 'evalargs1', 'makedec1',
'evalname2', 'evalargs2', 'makedec2',
'evalname3', 'evalargs3', 'makedec3',
'calldec3', 'calldec2', 'calldec1' ]
actions = []
@c1.make_decorator(c1.arg)
@c2.make_decorator(c2.arg)
@c3.make_decorator(c3.arg)
def foo(): return 42
self.assertEqual(foo(), 42)
self.assertEqual(actions, expected_actions)
# Test the equivalence claim in chapter 7 of the reference manual.
#
actions = []
def bar(): return 42
bar = c1.make_decorator(c1.arg)(c2.make_decorator(c2.arg)(c3.make_decorator(c3.arg)(bar)))
self.assertEqual(bar(), 42)
self.assertEqual(actions, expected_actions)
class TestClassDecorators(unittest.TestCase):
def test_simple(self):
def plain(x):
x.extra = 'Hello'
return x
@plain
class C(object): pass
self.assertEqual(C.extra, 'Hello')
def test_double(self):
def ten(x):
x.extra = 10
return x
def add_five(x):
x.extra += 5
return x
@add_five
@ten
class C(object): pass
self.assertEqual(C.extra, 15)
def test_order(self):
def applied_first(x):
x.extra = 'first'
return x
def applied_second(x):
x.extra = 'second'
return x
@applied_second
@applied_first
class C(object): pass
self.assertEqual(C.extra, 'second')
def test_main():
support.run_unittest(TestDecorators)
support.run_unittest(TestClassDecorators)
if __name__=="__main__":
test_main()
| lgpl-3.0 | 2,148,118,671,515,291,600 | 30.805825 | 98 | 0.529406 | false |
trdean/grEME | grc/python/expr_utils.py | 7 | 5486 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import string
VAR_CHARS = string.letters + string.digits + '_'
class graph(object):
"""
Simple graph structure held in a dictionary.
"""
def __init__(self): self._graph = dict()
def __str__(self): return str(self._graph)
def add_node(self, node_key):
if self._graph.has_key(node_key): return
self._graph[node_key] = set()
def remove_node(self, node_key):
if not self._graph.has_key(node_key): return
for edges in self._graph.values():
if node_key in edges: edges.remove(node_key)
self._graph.pop(node_key)
def add_edge(self, src_node_key, dest_node_key):
self._graph[src_node_key].add(dest_node_key)
def remove_edge(self, src_node_key, dest_node_key):
self._graph[src_node_key].remove(dest_node_key)
def get_nodes(self): return self._graph.keys()
def get_edges(self, node_key): return self._graph[node_key]
def expr_split(expr):
"""
Split up an expression by non alphanumeric characters, including underscore.
Leave strings in-tact.
#TODO ignore escaped quotes, use raw strings.
Args:
expr: an expression string
Returns:
a list of string tokens that form expr
"""
toks = list()
tok = ''
quote = ''
for char in expr:
if quote or char in VAR_CHARS:
if char == quote: quote = ''
tok += char
elif char in ("'", '"'):
toks.append(tok)
tok = char
quote = char
else:
toks.append(tok)
toks.append(char)
tok = ''
toks.append(tok)
return filter(lambda t: t, toks)
def expr_replace(expr, replace_dict):
"""
Search for vars in the expression and add the prepend.
Args:
expr: an expression string
replace_dict: a dict of find:replace
Returns:
a new expression with the prepend
"""
expr_splits = expr_split(expr)
for i, es in enumerate(expr_splits):
if es in replace_dict.keys():
expr_splits[i] = replace_dict[es]
return ''.join(expr_splits)
def get_variable_dependencies(expr, vars):
"""
Return a set of variables used in this expression.
Args:
expr: an expression string
vars: a list of variable names
Returns:
a subset of vars used in the expression
"""
expr_toks = expr_split(expr)
return set(filter(lambda v: v in expr_toks, vars))
def get_graph(exprs):
"""
Get a graph representing the variable dependencies
Args:
exprs: a mapping of variable name to expression
Returns:
a graph of variable deps
"""
vars = exprs.keys()
#get dependencies for each expression, load into graph
var_graph = graph()
for var in vars: var_graph.add_node(var)
for var, expr in exprs.iteritems():
for dep in get_variable_dependencies(expr, vars):
if dep != var: var_graph.add_edge(dep, var)
return var_graph
def sort_variables(exprs):
"""
Get a list of variables in order of dependencies.
Args:
exprs: a mapping of variable name to expression
Returns:
a list of variable names
@throws Exception circular dependencies
"""
var_graph = get_graph(exprs)
sorted_vars = list()
#determine dependency order
while var_graph.get_nodes():
#get a list of nodes with no edges
indep_vars = filter(lambda var: not var_graph.get_edges(var), var_graph.get_nodes())
if not indep_vars: raise Exception('circular dependency caught in sort_variables')
#add the indep vars to the end of the list
sorted_vars.extend(sorted(indep_vars))
#remove each edge-less node from the graph
for var in indep_vars: var_graph.remove_node(var)
return reversed(sorted_vars)
def sort_objects(objects, get_id, get_expr):
"""
Sort a list of objects according to their expressions.
Args:
objects: the list of objects to sort
get_id: the function to extract an id from the object
get_expr: the function to extract an expression from the object
Returns:
a list of sorted objects
"""
id2obj = dict([(get_id(obj), obj) for obj in objects])
#map obj id to expression code
id2expr = dict([(get_id(obj), get_expr(obj)) for obj in objects])
#sort according to dependency
sorted_ids = sort_variables(id2expr)
#return list of sorted objects
return [id2obj[id] for id in sorted_ids]
if __name__ == '__main__':
for i in sort_variables({'x':'1', 'y':'x+1', 'a':'x+y', 'b':'y+1', 'c':'a+b+x+y'}): print i
| gpl-3.0 | 6,109,285,493,461,117,000 | 29.99435 | 95 | 0.633431 | false |
cryptobanana/ansible | lib/ansible/modules/network/cloudengine/ce_vxlan_arp.py | 22 | 24179 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
evn_bgp:
description:
- Enables EVN BGP.
required: false
choices: ['enable', 'disable']
default: null
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
required: false
choices: ['enable', 'disable']
default: null
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
required: false
choices: ['enable', 'disable']
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
required: false
choices: ['enable', 'disable']
default: null
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
required: false
choices: ['bgp','none']
default: null
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
config = get_config(self.module, flags)
return config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,251,261,156,965,559,300 | 34.246356 | 106 | 0.563588 | false |
vipulroxx/sympy | sympy/liealgebras/type_e.py | 4 | 9860 | from sympy.core import Rational
from sympy.core.compatibility import range
from .cartan_type import Standard_Cartan
from sympy.matrices import eye
class TypeE(Standard_Cartan):
def __new__(cls, n):
if n < 6 or n > 8:
raise ValueError("Invalid value of n")
return Standard_Cartan.__new__(cls, "E", n)
def dimension(self):
"""
Returns the dimension of the vector space
V underlying the Lie algebra
Example
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.dimension()
8
"""
return 8
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a -1 in the ith position and a 1
in the jth postion.
"""
root = [0]*8
root[i] = -1
root[j] = 1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
This method returns the ith simple root for E_n.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("E6")
>>> c.simple_root(2)
[1, 1, 0, 0, 0, 0, 0, 0]
"""
n = self.n
if i == 1:
root = [-0.5]*8
root[0] = 0.5
root[7] = 0.5
return root
elif i == 2:
root = [0]*8
root[1] = 1
root[0] = 1
return root
else:
if i == 7 or i == 8 and n == 6:
raise ValueError("E6 only has six simple roots!")
if i == 8 and n == 7:
raise ValueError("E7 has only 7 simple roots!")
return self.basic_root(i-3, i-2)
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of E_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Example
======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
if n == 6:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
if (a + b + c + d + e)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
posroots[k] = root
return posroots
if n == 7:
posroots = {}
k = 0
for i in range(n-1):
for j in range(i+1, n-1):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
k += 1
posroots[k] = [0, 0, 0, 0, 0, 1, 1, 0]
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
for f in range(0, 2):
if (a + b + c + d + e + f)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
if f == 1:
root[5] = Rational(1, 2)
posroots[k] = root
return posroots
if n == 8:
posroots = {}
k = 0
for i in range(n):
for j in range(i+1, n):
k += 1
root = self.basic_root(i, j)
posroots[k] = root
k += 1
root = self.basic_root(i, j)
root[i] = 1
posroots[k] = root
root = [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2),
Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
for a in range(0, 2):
for b in range(0, 2):
for c in range(0, 2):
for d in range(0, 2):
for e in range(0, 2):
for f in range(0, 2):
for g in range(0, 2):
if (a + b + c + d + e + f + g)%2 == 0:
k += 1
if a == 1:
root[0] = Rational(-1, 2)
if b == 1:
root[1] = Rational(-1, 2)
if c == 1:
root[2] = Rational(-1, 2)
if d == 1:
root[3] = Rational(-1, 2)
if e == 1:
root[4] = Rational(-1, 2)
if f == 1:
root[5] = Rational(1, 2)
if g == 1:
root[6] = Rational(1, 2)
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots of E_n
"""
n = self.n
if n == 6:
return 72
if n == 7:
return 126
if n == 8:
return 240
def cartan_matrix(self):
"""
Returns the Cartan matrix for G_2
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2*eye(n)
i = 3
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0, 2] = m[2, 0] = -1
m[1, 3] = m[3, 1] = -1
m[2, 3] = -1
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of E_n
"""
n = self.n
if n == 6:
return 78
if n == 7:
return 133
if n == 8:
return 248
def dynkin_diagram(self):
n = self.n
diag = " "*8 + str(2) + "\n"
diag += " "*8 + "0\n"
diag += " "*8 + "|\n"
diag += " "*8 + "|\n"
diag += "---".join("0" for i in range(1, n)) + "\n"
diag += "1 " + " ".join(str(i) for i in range(3, n+1))
return diag
| bsd-3-clause | 691,532,899,652,707,100 | 33.355401 | 99 | 0.341886 | false |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_11] Dr_Jung_Update/shared_constants.py | 1 | 3460 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 14 02:04:12 2014
@author: deokwoo
* Description
- This file defines constant values shared among python modules.
- Should be included all python modules first.
"""
###############################################################################
# Constant global variables
###############################################################################
"""
New format of timestamps stored in the bin files
[mt.str2datetime(tmp[0]), dt[5], dt[4], dt[3], dt[6], dt[2], dt[1]]
- str2datetime() converts the string representation of date "%Y-%m-%d %H:%M:%S" to Python datetime object (naive)
- dt[5], dt[4], dt[3], dt[6], dt[2], dt[1]: second, minute, hour, weekday, month day, month
E.g.: an item in the list of ts that represents "2013-4-1 22:43:16"
[datetime.datetime(2013, 4, 1, 22, 43, 16), 16, 43, 22, 0, 1, 4]
"""
from dateutil import tz
import multiprocessing
CPU_CORE_NUM = multiprocessing.cpu_count()
# Directory Information
#DATA_DIR='../data_year/'
MATOUT_DIR='./GSBC/'
VTT_out_dir='./VTT/'
#VTT_data_dir='../Database/VTT/Binfiles/'
#VTT_weather_dir='../Database/VTT/weather/VTT_'
VTT_data_dir='/adsc/bigdata/input_data/VTT/Binfiles/'
VTT_weather_dir='/adsc/bigdata/input_data/VTT/data_year/weather/VTT_'
# Getting weather data from locally stored bin files if True
USE_WEATHER_DATA_BIN=True
#PROC_OUT_DIR=VTT_out_dir
PROC_OUT_DIR='./proc_out/'
DATA_DIR=VTT_data_dir
WEATHER_DIR=VTT_weather_dir
fig_dir='./png_files/'
csv_dir='./csv_files/'
FL_EXT='.bin'
len_data_dir=len(DATA_DIR)
len_fl_ext=len(FL_EXT)
IS_USING_PARALLEL_OPT=False
IS_SAVING_DATA_DICT=True
# in seconds
MIN=60; HOUR=60*MIN; DAY=HOUR*24; MONTH=DAY*31
# State definition...
PEAK=1;LOW_PEAK=-1;NO_PEAK=0;
# mininum number of discrete values to be float type
MIN_NUM_VAL_FOR_FLOAT=10
# Data type
INT_TYPE=0
FLOAT_TYPE=1
# Hour, Weekday, Day, Month
SEC_IDX=1;MIN_IDX=2;HR_IDX=3; WD_IDX=4; MD_IDX=5 ;MN_IDX=6;DT_IDX=0;
hourDict={0:'0am',1:'1am',2:'2am',3:'3am',4:'4am',5:'5am'\
,6:'6am',7:'7am',8:'8am',9:'9am',10:'10am',11:'11am',12:'12pm',13:'1pm',14:'2pm'\
,15:'3pm',16:'4pm',17:'5pm',18:'6pm',19:'7pm',20:'8pm',21:'9pm',22:'10pm',23:'11pm'}
monthDict={0:'Jan', 1:'Feb', 2:'Mar', 3:'Apr', 4:'May', 5:'Jun', 6:'Jul', 7:'Aug', 8:'Sep', 9:'Oct', 10:'Nov', 11:'Dec'}
weekDict={0:'Mon', 1:'Tue', 2:'Wed', 3:'Thur', 4:'Fri', 5:'Sat', 6:'Sun'}
stateDict={-1:'Low Peak',0:'No Peak',1:'High Peak'}
#monthDict={'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12}
#weekDict={'Mon':0, 'Tue':1, 'Wed':2, 'Thur':3, 'Fri':4, 'Sat':5, 'Sun':6}
Weekday=range(0,5)
Weekend=range(5,7)
Week=range(0,7)
# Hours parameters
am0=0;am1=1;am2=2;am3=3;am4=4;am5=5;am6=6;am7=7;am8=8;am9=9;am10=10;am11=11;
pm12=12;pm1=13;pm2=14;pm3=15;pm4=16;pm5=17;pm6=18;pm7=19;pm8=20;pm9=21;pm10=22;pm11=23;
# Week parameters
Mon=0;Tue=1;Wed=2;Thur=3;Fri=4;Sat=5;Sun=6
# Month parameters
Jan=0;Feb=1;Mar=2;Apr=3;May=4;Jun=5;Jul=6;Aug=7;Sep=8;Oct=9;Nov=10;Dec=11;
DayHours=range(24)
yearMonths=range(12)
# Define the period for analysis - year, month, day,hour
# Note: The sample data in the currently downloaded files are from 1 Apr 2013 to
# 30 Nov 2013.
# This is the best data set
#ANS_START_T=dt.datetime(2013,7,8,0)
#ANS_END_T=dt.datetime(2013,7,15,0)
# UTC time of weather data
from_zone = tz.gettz('UTC')
# VTT local time
#to_zone = tz.gettz('Europe/Helsinki')
to_zone = tz.gettz('Asia/Seoul') | gpl-2.0 | -4,760,140,127,857,824,000 | 33.959596 | 122 | 0.633526 | false |
Denisolt/IEEE-NYIT-MA | local/lib/python2.7/site-packages/django/db/models/sql/aggregates.py | 77 | 4846 | """
Classes to represent the default SQL aggregate functions
"""
import copy
import warnings
from django.db.models.fields import FloatField, IntegerField
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance']
warnings.warn(
"django.db.models.sql.aggregates is deprecated. Use "
"django.db.models.aggregates instead.",
RemovedInDjango110Warning, stacklevel=2)
class Aggregate(RegisterLookupMixin):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = self._ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = self._computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
# Two fake fields used to identify aggregate types in data-conversion operations.
@cached_property
def _ordinal_aggregate_field(self):
return IntegerField()
@cached_property
def _computed_aggregate_field(self):
return FloatField()
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, compiler, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(compiler, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join(compiler(c) for c in self.col)
else:
field_name = compiler(self.col)
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
def get_group_by_cols(self):
return []
@property
def output_field(self):
return self.field
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
| gpl-3.0 | 1,755,255,112,814,422,300 | 30.264516 | 91 | 0.627115 | false |
TRESCLOUD/odoo | openerp/addons/base/ir/ir_ui_menu.py | 58 | 17374 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import re
import threading
from openerp.tools.safe_eval import safe_eval as eval
from openerp import tools
import openerp.modules
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
def one_in(setA, setB):
"""Check the presence of an element of setA in setB
"""
for x in setA:
if x in setB:
return True
return False
class ir_ui_menu(osv.osv):
_name = 'ir.ui.menu'
def __init__(self, *args, **kwargs):
self.cache_lock = threading.RLock()
self._cache = {}
super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache')
def clear_cache(self):
with self.cache_lock:
# radical but this doesn't frequently happen
if self._cache:
# Normally this is done by openerp.tools.ormcache
# but since we do not use it, set it by ourself.
self.pool._any_cache_cleared = True
self._cache = {}
def _filter_visible_menus(self, cr, uid, ids, context=None):
"""Filters the give menu ids to only keep the menu items that should be
visible in the menu hierarchy of the current user.
Uses a cache for speeding up the computation.
"""
with self.cache_lock:
modelaccess = self.pool.get('ir.model.access')
user_groups = set(self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['groups_id'])['groups_id'])
result = []
for menu in self.browse(cr, uid, ids, context=context):
# this key works because user access rights are all based on user's groups (cfr ir_model_access.check)
key = (cr.dbname, menu.id, tuple(user_groups))
if key in self._cache:
if self._cache[key]:
result.append(menu.id)
#elif not menu.groups_id and not menu.action:
# result.append(menu.id)
continue
self._cache[key] = False
if menu.groups_id:
restrict_to_groups = [g.id for g in menu.groups_id]
if not user_groups.intersection(restrict_to_groups):
continue
#result.append(menu.id)
#self._cache[key] = True
#continue
if menu.action:
# we check if the user has access to the action of the menu
data = menu.action
if data:
model_field = { 'ir.actions.act_window': 'res_model',
'ir.actions.report.xml': 'model',
'ir.actions.wizard': 'model',
'ir.actions.server': 'model_id',
}
field = model_field.get(menu.action._name)
if field and data[field]:
if not modelaccess.check(cr, uid, data[field], 'read', False):
continue
else:
# if there is no action, it's a 'folder' menu
if not menu.child_id:
# not displayed if there is no children
continue
result.append(menu.id)
self._cache[key] = True
return result
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
ids = super(ir_ui_menu, self).search(cr, uid, args, offset=0,
limit=None, order=order, context=context, count=False)
if not ids:
if count:
return 0
return []
# menu filtering is done only on main menu tree, not other menu lists
if context.get('ir.ui.menu.full_list'):
result = ids
else:
result = self._filter_visible_menus(cr, uid, ids, context=context)
if offset:
result = result[long(offset):]
if limit:
result = result[:long(limit)]
if count:
return len(result)
return result
def name_get(self, cr, uid, ids, context=None):
res = []
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context is None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id:
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + "/"
else:
parent_path = ''
return parent_path + elmt.name
def create(self, *args, **kwargs):
self.clear_cache()
return super(ir_ui_menu, self).create(*args, **kwargs)
def write(self, *args, **kwargs):
self.clear_cache()
return super(ir_ui_menu, self).write(*args, **kwargs)
def unlink(self, cr, uid, ids, context=None):
# Detach children and promote them to top-level, because it would be unwise to
# cascade-delete submenus blindly. We also can't use ondelete=set null because
# that is not supported when _parent_store is used (would silently corrupt it).
# TODO: ideally we should move them under a generic "Orphans" menu somewhere?
if isinstance(ids, (int, long)):
ids = [ids]
local_context = dict(context or {})
local_context['ir.ui.menu.full_list'] = True
direct_children_ids = self.search(cr, uid, [('parent_id', 'in', ids)], context=local_context)
if direct_children_ids:
self.write(cr, uid, direct_children_ids, {'parent_id': False})
result = super(ir_ui_menu, self).unlink(cr, uid, ids, context=context)
self.clear_cache()
return result
def copy(self, cr, uid, id, default=None, context=None):
ir_values_obj = self.pool.get('ir.values')
res = super(ir_ui_menu, self).copy(cr, uid, id, context=context)
datas=self.read(cr,uid,[res],['name'])[0]
rex=re.compile('\([0-9]+\)')
concat=rex.findall(datas['name'])
if concat:
next_num=int(concat[0])+1
datas['name']=rex.sub(('(%d)'%next_num),datas['name'])
else:
datas['name'] += '(1)'
self.write(cr,uid,[res],{'name':datas['name']})
ids = ir_values_obj.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('res_id', '=', id),
])
for iv in ir_values_obj.browse(cr, uid, ids):
ir_values_obj.copy(cr, uid, iv.id, default={'res_id': res},
context=context)
return res
def _action(self, cursor, user, ids, name, arg, context=None):
res = {}
ir_values_obj = self.pool.get('ir.values')
value_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', 'in', ids)],
context=context)
values_action = {}
for value in ir_values_obj.browse(cursor, user, value_ids, context=context):
values_action[value.res_id] = value.value
for menu_id in ids:
res[menu_id] = values_action.get(menu_id, False)
return res
def _action_inv(self, cursor, user, menu_id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
ir_values_obj = self.pool.get('ir.values')
values_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', '=', menu_id)],
context=context)
if value and values_ids:
ir_values_obj.write(cursor, user, values_ids, {'value': value}, context=ctx)
elif value:
# no values_ids, create binding
ir_values_obj.create(cursor, user, {
'name': 'Menuitem',
'model': self._name,
'value': value,
'key': 'action',
'key2': 'tree_but_open',
'res_id': menu_id,
}, context=ctx)
elif values_ids:
# value is False, remove existing binding
ir_values_obj.unlink(cursor, user, values_ids, context=ctx)
def _get_icon_pict(self, cr, uid, ids, name, args, context):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = ('stock', (m.icon,'ICON_SIZE_MENU'))
return res
def onchange_icon(self, cr, uid, ids, icon):
if not icon:
return {}
return {'type': {'icon_pict': 'picture'}, 'value': {'icon_pict': ('stock', (icon,'ICON_SIZE_MENU'))}}
def read_image(self, path):
if not path:
return False
path_info = path.split(',')
icon_path = openerp.modules.get_module_resource(path_info[0],path_info[1])
icon_image = False
if icon_path:
try:
icon_file = tools.file_open(icon_path,'rb')
icon_image = base64.encodestring(icon_file.read())
finally:
icon_file.close()
return icon_image
def _get_image_icon(self, cr, uid, ids, names, args, context=None):
res = {}
for menu in self.browse(cr, uid, ids, context=context):
res[menu.id] = r = {}
for fn in names:
fn_src = fn[:-5] # remove _data
r[fn] = self.read_image(menu[fn_src])
return res
def _get_needaction_enabled(self, cr, uid, ids, field_names, args, context=None):
""" needaction_enabled: tell whether the menu has a related action
that uses the needaction mechanism. """
res = dict.fromkeys(ids, False)
for menu in self.browse(cr, uid, ids, context=context):
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
obj = self.pool.get(menu.action.res_model)
if obj and obj._needaction:
res[menu.id] = True
return res
def get_needaction_data(self, cr, uid, ids, context=None):
""" Return for each menu entry of ids :
- if it uses the needaction mechanism (needaction_enabled)
- the needaction counter of the related action, taking into account
the action domain
"""
if context is None:
context = {}
res = {}
menu_ids = set()
for menu in self.browse(cr, uid, ids, context=context):
menu_ids.add(menu.id)
ctx = None
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context:
try:
# use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id`
eval_ctx = tools.UnquoteEvalContext(**context)
ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None
except Exception:
# if the eval still fails for some reason, we'll simply skip this menu
pass
menu_ref = ctx and ctx.get('needaction_menu_ref')
if menu_ref:
if not isinstance(menu_ref, list):
menu_ref = [menu_ref]
model_data_obj = self.pool.get('ir.model.data')
for menu_data in menu_ref:
model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1])
if (model == 'ir.ui.menu'):
menu_ids.add(id)
menu_ids = list(menu_ids)
for menu in self.browse(cr, uid, menu_ids, context=context):
res[menu.id] = {
'needaction_enabled': False,
'needaction_counter': False,
}
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
obj = self.pool.get(menu.action.res_model)
if obj and obj._needaction:
if menu.action.type == 'ir.actions.act_window':
dom = menu.action.domain and eval(menu.action.domain, {'uid': uid}) or []
else:
dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain')
res[menu.id]['needaction_enabled'] = obj._needaction
res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=context)
return res
_columns = {
'name': fields.char('Menu', size=64, required=True, translate=True),
'sequence': fields.integer('Sequence'),
'child_id': fields.one2many('ir.ui.menu', 'parent_id', 'Child IDs'),
'parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', select=True, ondelete="restrict"),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
'groups_id': fields.many2many('res.groups', 'ir_ui_menu_group_rel',
'menu_id', 'gid', 'Groups', help="If you have groups, the visibility of this menu will be based on these groups. "\
"If this field is empty, OpenERP will compute visibility based on the related object's read access."),
'complete_name': fields.function(_get_full_name,
string='Full Path', type='char', size=128),
'icon': fields.selection(tools.icons, 'Icon', size=64),
'icon_pict': fields.function(_get_icon_pict, type='char', size=32),
'web_icon': fields.char('Web Icon File', size=128),
'web_icon_hover': fields.char('Web Icon File (hover)', size=128),
'web_icon_data': fields.function(_get_image_icon, string='Web Icon Image', type='binary', readonly=True, store=True, multi='icon'),
'web_icon_hover_data': fields.function(_get_image_icon, string='Web Icon Image (hover)', type='binary', readonly=True, store=True, multi='icon'),
'needaction_enabled': fields.function(_get_needaction_enabled,
type='boolean',
store=True,
string='Target model uses the need action mechanism',
help='If the menu entry action is an act_window action, and if this action is related to a model that uses the need_action mechanism, this field is set to true. Otherwise, it is false.'),
'action': fields.function(_action, fnct_inv=_action_inv,
type='reference', string='Action',
selection=[
('ir.actions.report.xml', 'ir.actions.report.xml'),
('ir.actions.act_window', 'ir.actions.act_window'),
('ir.actions.wizard', 'ir.actions.wizard'),
('ir.actions.act_url', 'ir.actions.act_url'),
('ir.actions.server', 'ir.actions.server'),
('ir.actions.client', 'ir.actions.client'),
]),
}
def _rec_message(self, cr, uid, ids, context=None):
return _('Error ! You can not create recursive Menu.')
_constraints = [
(osv.osv._check_recursion, _rec_message, ['parent_id'])
]
_defaults = {
'icon': 'STOCK_OPEN',
'icon_pict': ('stock', ('STOCK_OPEN', 'ICON_SIZE_MENU')),
'sequence': 10,
}
_order = "sequence,id"
_parent_store = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,871,050,278,813,709,000 | 43.321429 | 199 | 0.544262 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/offline_user_data_job_service/transports/grpc.py | 1 | 16240 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import offline_user_data_job
from google.ads.googleads.v7.services.types import offline_user_data_job_service
from google.longrunning import operations_pb2 # type: ignore
from .base import OfflineUserDataJobServiceTransport, DEFAULT_CLIENT_INFO
class OfflineUserDataJobServiceGrpcTransport(OfflineUserDataJobServiceTransport):
"""gRPC backend transport for OfflineUserDataJobService.
Service to manage offline user data jobs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if 'operations_client' not in self.__dict__:
self.__dict__['operations_client'] = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__['operations_client']
@property
def create_offline_user_data_job(self) -> Callable[
[offline_user_data_job_service.CreateOfflineUserDataJobRequest],
offline_user_data_job_service.CreateOfflineUserDataJobResponse]:
r"""Return a callable for the create offline user data job method over gRPC.
Creates an offline user data job.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__
`NotAllowlistedError <>`__ `OfflineUserDataJobError <>`__
`QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.CreateOfflineUserDataJobRequest],
~.CreateOfflineUserDataJobResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_offline_user_data_job' not in self._stubs:
self._stubs['create_offline_user_data_job'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.OfflineUserDataJobService/CreateOfflineUserDataJob',
request_serializer=offline_user_data_job_service.CreateOfflineUserDataJobRequest.serialize,
response_deserializer=offline_user_data_job_service.CreateOfflineUserDataJobResponse.deserialize,
)
return self._stubs['create_offline_user_data_job']
@property
def get_offline_user_data_job(self) -> Callable[
[offline_user_data_job_service.GetOfflineUserDataJobRequest],
offline_user_data_job.OfflineUserDataJob]:
r"""Return a callable for the get offline user data job method over gRPC.
Returns the offline user data job.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetOfflineUserDataJobRequest],
~.OfflineUserDataJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_offline_user_data_job' not in self._stubs:
self._stubs['get_offline_user_data_job'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.OfflineUserDataJobService/GetOfflineUserDataJob',
request_serializer=offline_user_data_job_service.GetOfflineUserDataJobRequest.serialize,
response_deserializer=offline_user_data_job.OfflineUserDataJob.deserialize,
)
return self._stubs['get_offline_user_data_job']
@property
def add_offline_user_data_job_operations(self) -> Callable[
[offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest],
offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse]:
r"""Return a callable for the add offline user data job
operations method over gRPC.
Adds operations to the offline user data job.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__ `MutateError <>`__
`OfflineUserDataJobError <>`__ `QuotaError <>`__
`RequestError <>`__
Returns:
Callable[[~.AddOfflineUserDataJobOperationsRequest],
~.AddOfflineUserDataJobOperationsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'add_offline_user_data_job_operations' not in self._stubs:
self._stubs['add_offline_user_data_job_operations'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.OfflineUserDataJobService/AddOfflineUserDataJobOperations',
request_serializer=offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest.serialize,
response_deserializer=offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse.deserialize,
)
return self._stubs['add_offline_user_data_job_operations']
@property
def run_offline_user_data_job(self) -> Callable[
[offline_user_data_job_service.RunOfflineUserDataJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the run offline user data job method over gRPC.
Runs the offline user data job.
When finished, the long running operation will contain the
processing result or failure information, if any.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__
`HeaderError <>`__ `InternalError <>`__
`OfflineUserDataJobError <>`__ `QuotaError <>`__
`RequestError <>`__
Returns:
Callable[[~.RunOfflineUserDataJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_offline_user_data_job' not in self._stubs:
self._stubs['run_offline_user_data_job'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.OfflineUserDataJobService/RunOfflineUserDataJob',
request_serializer=offline_user_data_job_service.RunOfflineUserDataJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['run_offline_user_data_job']
__all__ = (
'OfflineUserDataJobServiceGrpcTransport',
)
| apache-2.0 | -5,768,659,833,520,609,000 | 45.267806 | 120 | 0.623214 | false |
aurarad/auroracoin | test/functional/feature_dersig.py | 1 | 6371 | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-par=1', '-enablebip61']] # Use only one script thread to get the exact reject reason for testing
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_dersig_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip66'],
{
"active": is_active,
"height": DERSIG_HEIGHT,
"type": "buried",
},
)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.test_dersig_info(is_active=False)
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
self.nodes[0].p2p.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]):
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
self.nodes[0].p2p.sync_with_ping()
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputs on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]):
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
self.nodes[0].p2p.sync_with_ping()
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
self.nodes[0].p2p.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| mit | 4,940,157,119,081,665,000 | 41.473333 | 183 | 0.648407 | false |
bbangert/velruse | velruse/providers/yahoo.py | 3 | 3776 | from __future__ import absolute_import
from openid.extensions import ax
import requests
from requests_oauthlib import OAuth1
from pyramid.security import NO_PERMISSION_REQUIRED
from ..api import register_provider
from ..compat import parse_qsl
from .oid_extensions import OAuthRequest
from .openid import (
OpenIDAuthenticationComplete,
OpenIDConsumer,
)
log = __import__('logging').getLogger(__name__)
YAHOO_OAUTH = 'https://api.login.yahoo.com/oauth/v2/get_token'
class YahooAuthenticationComplete(OpenIDAuthenticationComplete):
"""Yahoo auth complete"""
def includeme(config):
config.add_directive('add_yahoo_login', add_yahoo_login)
def add_yahoo_login(config,
realm=None,
storage=None,
consumer_key=None,
consumer_secret=None,
login_path='/login/yahoo',
callback_path='/login/yahoo/callback',
name='yahoo'):
"""
Add a Yahoo login provider to the application.
OpenID parameters: realm, storage
OAuth parameters: consumer_key, consumer_secret
"""
provider = YahooConsumer(name, realm, storage,
consumer_key, consumer_secret)
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
register_provider(config, name, provider)
class YahooConsumer(OpenIDConsumer):
def __init__(self, name, realm=None, storage=None,
oauth_key=None, oauth_secret=None):
"""Handle Yahoo Auth
This also handles making an OAuth request during the OpenID
authentication.
"""
OpenIDConsumer.__init__(self, name, 'yahoo', realm, storage,
context=YahooAuthenticationComplete)
self.oauth_key = oauth_key
self.oauth_secret = oauth_secret
def _lookup_identifier(self, request, identifier):
"""Return the Yahoo OpenID directed endpoint"""
return 'https://me.yahoo.com/'
def _update_authrequest(self, request, authrequest):
# Add on the Attribute Exchange for those that support that
ax_request = ax.FetchRequest()
for attrib in ['http://axschema.org/namePerson/friendly',
'http://axschema.org/namePerson',
'http://axschema.org/person/gender',
'http://axschema.org/pref/timezone',
'http://axschema.org/media/image/default',
'http://axschema.org/contact/email']:
ax_request.add(ax.AttrInfo(attrib))
authrequest.addExtension(ax_request)
# Add OAuth request?
if 'oauth' in request.POST:
oauth_request = OAuthRequest(consumer=self.oauth_key)
authrequest.addExtension(oauth_request)
def _get_access_token(self, request_token):
oauth = OAuth1(
self.oauth_key,
client_secret=self.oauth_secret,
resource_owner_key=request_token)
resp = requests.post(YAHOO_OAUTH, auth=oauth)
if resp.status_code != 200:
log.error(
'OAuth token validation failed. Status: %d, Content: %s',
resp.status_code, resp.content)
else:
access_token = dict(parse_qsl(resp.text))
return {
'oauthAccessToken': access_token['oauth_token'],
'oauthAccessTokenSecret': access_token['oauth_token_secret'],
}
| mit | 4,376,776,380,193,603,600 | 32.122807 | 77 | 0.605932 | false |
tow/dnspython | dns/namedict.py | 99 | 2106 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import dns.name
class NameDict(dict):
"""A dictionary whose keys are dns.name.Name objects.
@ivar max_depth: the maximum depth of the keys that have ever been
added to the dictionary.
@type max_depth: int
"""
def __init__(self, *args, **kwargs):
super(NameDict, self).__init__(*args, **kwargs)
self.max_depth = 0
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
depth = len(key)
if depth > self.max_depth:
self.max_depth = depth
super(NameDict, self).__setitem__(key, value)
def get_deepest_match(self, name):
"""Find the deepest match to I{name} in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of I{name}.
@param name: the name
@type name: dns.name.Name object
@rtype: (key, value) tuple
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if self.has_key(n):
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)
| isc | -5,176,188,284,169,628,000 | 34.694915 | 72 | 0.648623 | false |
ninuxorg/netengine | netengine/backends/ssh/airos.py | 1 | 5763 | from cached_property import cached_property
from netengine.backends.ssh import SSH
__all__ = ['AirOS']
class AirOS(SSH):
"""
Ubiquiti AirOS SSH backend
Version 5.5.8
"""
def __str__(self):
""" print a human readable object description """
return u"<SSH (Ubiquity AirOS): %s@%s>" % (self.username, self.host)
@cached_property
def _ubntbox(self):
"""
returns "ubntbox mca-status" output in a python dictionary
"""
output = self.run('ubntbox mca-status')
info = {}
# loop over output
for line in output.split('\r\n'):
parts = line.split('=')
# main device info
if len(parts) > 2:
subparts = line.split(',')
for subpart in subparts:
key, value = subpart.split('=')
info[key] = value
# all other stuff
elif len(parts) == 2:
info[parts[0]] = parts[1]
else:
pass
# return dictionary
return info
@cached_property
def _systemcfg(self):
"""
return main system configuration in a python dictionary
"""
output = self.run('cat /tmp/system.cfg')
info = {}
# parse lines
for line in output.split('\n'):
parts = line.split('=')
# if subvalues
if len(parts) == 2:
# use sub dicttionaries
info[parts[0]] = parts[1]
# return dictionary
return info
@property
def os(self):
""" get OS string, return tuple with OS name and OS version """
return ('AirOS', self._ubntbox['firmwareVersion'])
@property
def name(self):
""" get device name """
return self.run('uname -a').split(' ')[1]
@property
def model(self):
""" get device model name, eg: Nanostation M5, Rocket M5 """
return self._ubntbox['platform']
@property
def RAM_total(self):
return int(self._ubntbox['memTotal'])
@property
def uptime(self):
return int(self._ubntbox['uptime'])
@property
def ethernet_standard(self):
""" determine ethernet standard """
if '100Mbps' in self._ubntbox['lanSpeed']:
return 'fast'
elif '1000Mbps' in self._ubntbox['lanSpeed']:
return 'gigabit'
elif '10Mbps' in self._ubntbox['lanSpeed']:
return 'legacy'
else:
return None
@property
def ethernet_duplex(self):
""" determine if ethernet interface is full-duplex or not """
if 'Full' in self._ubntbox['lanSpeed']:
return 'full'
elif 'Half' in self._ubntbox['lanSpeed']:
return 'half'
@property
def wireless_channel_width(self):
""" retrieve wireless channel width """
if '20' in self._systemcfg['radio.1.ieee_mode']:
return 20
elif '40' in self._systemcfg['radio.1.ieee_mode']:
return 40
else:
return None
@property
def wireless_mode(self):
""" retrieve wireless mode (AP/STA) """
return self._ubntbox['wlanOpmode']
@property
def wireless_channel(self):
""" retrieve wireless channel / frequency """
return self._ubntbox['freq']
@property
def wireless_output_power(self):
""" retrieve output power """
return int(self._systemcfg['radio.1.txpower'])
@property
def wireless_dbm(self):
""" get dbm """
return self._ubntbox['signal']
@property
def wireless_noise(self):
""" retrieve noise """
return self._ubntbox['noise']
def _filter_interfaces(self):
"""
tmp
"""
wireless_interfaces = self.iwconfig()
interfaces = self.ifconfig()
results = []
# loop over interfaces
for interface in interfaces:
# is it Ethernet?
if 'eth' in interface['name']:
interface['type'] = 'ethernet'
# is it Wireless?
elif 'wlan' in interface['name'] or 'ath' in interface['name']:
interface['type'] = 'wireless'
interface['wireless'] = {
"channel": self.wireless_channel,
"channel_width": self.wireless_channel_width,
"mode": self.wireless_mode,
"tx_power": self.wireless_output_power,
"dbm": self.wireless_dbm,
"noise": self.wireless_noise
}
# merge with iwconfig
for wireless_if in wireless_interfaces:
if wireless_if['name'] == interface['name']:
interface['wireless'].update(wireless_if['wireless'])
# append result to list of interfaces
results.append(interface)
# return results
return results
def _filter_routing_protocols(self):
results = []
if self.olsr:
results.append(self._dict({
"name": "olsr",
"version": self.olsr[0]
}))
# other routing protocols
return results
def to_dict(self):
return self._dict({
"name": self.name,
"type": "radio",
"os": self.os[0],
"os_version": self.os[1],
"manufacturer": "Ubiquiti Networks, INC",
"model": self.model,
"RAM_total": self.RAM_total,
"uptime": self.uptime,
"interfaces": self._filter_interfaces(),
"routing_protocols": self._filter_routing_protocols()
})
| mit | -3,169,432,203,465,434,000 | 29.015625 | 77 | 0.516051 | false |
jiumx60rus/grishyGhost | node_modules/nodegit/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 388 | 47235 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit | -3,786,306,663,090,686,000 | 42.857939 | 80 | 0.657817 | false |
alexston/calibre-webserver | src/odf/meta.py | 98 | 2136 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import METANS
from element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
| gpl-3.0 | 5,097,257,572,925,608,000 | 31.348485 | 80 | 0.708665 | false |
ecederstrand/django | tests/template_tests/syntax_tests/test_if.py | 73 | 23743 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import TestObj, setup
class IfTagTests(SimpleTestCase):
@setup({'if-tag01': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag01(self):
output = self.engine.render_to_string('if-tag01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag02': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag02(self):
output = self.engine.render_to_string('if-tag02', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag03': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag03(self):
output = self.engine.render_to_string('if-tag03')
self.assertEqual(output, 'no')
@setup({'if-tag04': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag04(self):
output = self.engine.render_to_string('if-tag04', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag05': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag05(self):
output = self.engine.render_to_string('if-tag05', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag06': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag06(self):
output = self.engine.render_to_string('if-tag06')
self.assertEqual(output, '')
@setup({'if-tag07': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag07(self):
output = self.engine.render_to_string('if-tag07', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag08': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag08(self):
output = self.engine.render_to_string('if-tag08', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag09': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag09(self):
output = self.engine.render_to_string('if-tag09')
self.assertEqual(output, 'nothing')
@setup({'if-tag10': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag10(self):
output = self.engine.render_to_string('if-tag10', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag11': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag11(self):
output = self.engine.render_to_string('if-tag11', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag12': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag12(self):
output = self.engine.render_to_string('if-tag12', {'baz': True})
self.assertEqual(output, 'baz')
@setup({'if-tag13': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag13(self):
output = self.engine.render_to_string('if-tag13')
self.assertEqual(output, 'nothing')
# Filters
@setup({'if-tag-filter01': '{% if foo|length == 5 %}yes{% else %}no{% endif %}'})
def test_if_tag_filter01(self):
output = self.engine.render_to_string('if-tag-filter01', {'foo': 'abcde'})
self.assertEqual(output, 'yes')
@setup({'if-tag-filter02': '{% if foo|upper == \'ABC\' %}yes{% else %}no{% endif %}'})
def test_if_tag_filter02(self):
output = self.engine.render_to_string('if-tag-filter02')
self.assertEqual(output, 'no')
# Equality
@setup({'if-tag-eq01': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq01(self):
output = self.engine.render_to_string('if-tag-eq01')
self.assertEqual(output, 'yes')
@setup({'if-tag-eq02': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq02(self):
output = self.engine.render_to_string('if-tag-eq02', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-eq03': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq03(self):
output = self.engine.render_to_string('if-tag-eq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-eq04': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq04(self):
output = self.engine.render_to_string('if-tag-eq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'no')
@setup({'if-tag-eq05': '{% if foo == \'\' %}yes{% else %}no{% endif %}'})
def test_if_tag_eq05(self):
output = self.engine.render_to_string('if-tag-eq05')
self.assertEqual(output, 'no')
# Comparison
@setup({'if-tag-gt-01': '{% if 2 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_01(self):
output = self.engine.render_to_string('if-tag-gt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gt-02': '{% if 1 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_02(self):
output = self.engine.render_to_string('if-tag-gt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-gte-01': '{% if 1 >= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_01(self):
output = self.engine.render_to_string('if-tag-gte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gte-02': '{% if 1 >= 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_02(self):
output = self.engine.render_to_string('if-tag-gte-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lt-01': '{% if 1 < 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_01(self):
output = self.engine.render_to_string('if-tag-lt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lt-02': '{% if 1 < 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_02(self):
output = self.engine.render_to_string('if-tag-lt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lte-01': '{% if 1 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_01(self):
output = self.engine.render_to_string('if-tag-lte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lte-02': '{% if 2 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_02(self):
output = self.engine.render_to_string('if-tag-lte-02')
self.assertEqual(output, 'no')
# Contains
@setup({'if-tag-in-01': '{% if 1 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_01(self):
output = self.engine.render_to_string('if-tag-in-01', {'x': [1]})
self.assertEqual(output, 'yes')
@setup({'if-tag-in-02': '{% if 2 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_02(self):
output = self.engine.render_to_string('if-tag-in-02', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-01': '{% if 1 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_01(self):
output = self.engine.render_to_string('if-tag-not-in-01', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-02': '{% if 2 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_02(self):
output = self.engine.render_to_string('if-tag-not-in-02', {'x': [1]})
self.assertEqual(output, 'yes')
# AND
@setup({'if-tag-and01': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and01(self):
output = self.engine.render_to_string('if-tag-and01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-and02': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and02(self):
output = self.engine.render_to_string('if-tag-and02', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and03': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and03(self):
output = self.engine.render_to_string('if-tag-and03', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and04': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and04(self):
output = self.engine.render_to_string('if-tag-and04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and05': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and05(self):
output = self.engine.render_to_string('if-tag-and05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and06': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and06(self):
output = self.engine.render_to_string('if-tag-and06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and07': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and07(self):
output = self.engine.render_to_string('if-tag-and07', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and08': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and08(self):
output = self.engine.render_to_string('if-tag-and08', {'bar': True})
self.assertEqual(output, 'no')
# OR
@setup({'if-tag-or01': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or01(self):
output = self.engine.render_to_string('if-tag-or01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or02': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or02(self):
output = self.engine.render_to_string('if-tag-or02', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-or03': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or03(self):
output = self.engine.render_to_string('if-tag-or03', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or04': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or04(self):
output = self.engine.render_to_string('if-tag-or04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or05': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or05(self):
output = self.engine.render_to_string('if-tag-or05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or06': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or06(self):
output = self.engine.render_to_string('if-tag-or06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or07': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or07(self):
output = self.engine.render_to_string('if-tag-or07', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or08': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or08(self):
output = self.engine.render_to_string('if-tag-or08', {'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or09': '{% if foo or bar or baz %}yes{% else %}no{% endif %}'})
def test_if_tag_or09(self):
"""
multiple ORs
"""
output = self.engine.render_to_string('if-tag-or09', {'baz': True})
self.assertEqual(output, 'yes')
# NOT
@setup({'if-tag-not01': '{% if not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not01(self):
output = self.engine.render_to_string('if-tag-not01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not02': '{% if not not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not02(self):
output = self.engine.render_to_string('if-tag-not02', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not06': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not06(self):
output = self.engine.render_to_string('if-tag-not06')
self.assertEqual(output, 'no')
@setup({'if-tag-not07': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not07(self):
output = self.engine.render_to_string('if-tag-not07', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not08': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not08(self):
output = self.engine.render_to_string('if-tag-not08', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not09': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not09(self):
output = self.engine.render_to_string('if-tag-not09', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not10': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not10(self):
output = self.engine.render_to_string('if-tag-not10', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not11': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not11(self):
output = self.engine.render_to_string('if-tag-not11')
self.assertEqual(output, 'no')
@setup({'if-tag-not12': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not12(self):
output = self.engine.render_to_string('if-tag-not12', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not13': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not13(self):
output = self.engine.render_to_string('if-tag-not13', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not14': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not14(self):
output = self.engine.render_to_string('if-tag-not14', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not15': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not15(self):
output = self.engine.render_to_string('if-tag-not15', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not16': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not16(self):
output = self.engine.render_to_string('if-tag-not16')
self.assertEqual(output, 'yes')
@setup({'if-tag-not17': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not17(self):
output = self.engine.render_to_string('if-tag-not17', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not18': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not18(self):
output = self.engine.render_to_string('if-tag-not18', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not19': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not19(self):
output = self.engine.render_to_string('if-tag-not19', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not20': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not20(self):
output = self.engine.render_to_string('if-tag-not20', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not21': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not21(self):
output = self.engine.render_to_string('if-tag-not21')
self.assertEqual(output, 'yes')
@setup({'if-tag-not22': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not22(self):
output = self.engine.render_to_string('if-tag-not22', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not23': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not23(self):
output = self.engine.render_to_string('if-tag-not23', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not24': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not24(self):
output = self.engine.render_to_string('if-tag-not24', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not25': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not25(self):
output = self.engine.render_to_string('if-tag-not25', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not26': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not26(self):
output = self.engine.render_to_string('if-tag-not26')
self.assertEqual(output, 'yes')
@setup({'if-tag-not27': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not27(self):
output = self.engine.render_to_string('if-tag-not27', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not28': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not28(self):
output = self.engine.render_to_string('if-tag-not28', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not29': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not29(self):
output = self.engine.render_to_string('if-tag-not29', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not30': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not30(self):
output = self.engine.render_to_string('if-tag-not30', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not31': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not31(self):
output = self.engine.render_to_string('if-tag-not31')
self.assertEqual(output, 'yes')
@setup({'if-tag-not32': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not32(self):
output = self.engine.render_to_string('if-tag-not32', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not33': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not33(self):
output = self.engine.render_to_string('if-tag-not33', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not34': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not34(self):
output = self.engine.render_to_string('if-tag-not34', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not35': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not35(self):
output = self.engine.render_to_string('if-tag-not35', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
# Various syntax errors
@setup({'if-tag-error01': '{% if %}yes{% endif %}'})
def test_if_tag_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error01')
@setup({'if-tag-error02': '{% if foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error02', {'foo': True})
@setup({'if-tag-error03': '{% if foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error03', {'foo': True})
@setup({'if-tag-error04': '{% if not foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error04', {'foo': True})
@setup({'if-tag-error05': '{% if not foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error05', {'foo': True})
@setup({'if-tag-error06': '{% if abc def %}yes{% endif %}'})
def test_if_tag_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error06')
@setup({'if-tag-error07': '{% if not %}yes{% endif %}'})
def test_if_tag_error07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error07')
@setup({'if-tag-error08': '{% if and %}yes{% endif %}'})
def test_if_tag_error08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error08')
@setup({'if-tag-error09': '{% if or %}yes{% endif %}'})
def test_if_tag_error09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error09')
@setup({'if-tag-error10': '{% if == %}yes{% endif %}'})
def test_if_tag_error10(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error10')
@setup({'if-tag-error11': '{% if 1 == %}yes{% endif %}'})
def test_if_tag_error11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error11')
@setup({'if-tag-error12': '{% if a not b %}yes{% endif %}'})
def test_if_tag_error12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error12')
@setup({'if-tag-shortcircuit01': '{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit01(self):
"""
If evaluations are shortcircuited where possible
"""
output = self.engine.render_to_string('if-tag-shortcircuit01', {'x': TestObj()})
self.assertEqual(output, 'yes')
@setup({'if-tag-shortcircuit02': '{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit02(self):
"""
The is_bad() function should not be evaluated. If it is, an
exception is raised.
"""
output = self.engine.render_to_string('if-tag-shortcircuit02', {'x': TestObj()})
self.assertEqual(output, 'no')
@setup({'if-tag-badarg01': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg01(self):
"""
Non-existent args
"""
output = self.engine.render_to_string('if-tag-badarg01')
self.assertEqual(output, '')
@setup({'if-tag-badarg02': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg02(self):
output = self.engine.render_to_string('if-tag-badarg02', {'y': 0})
self.assertEqual(output, '')
@setup({'if-tag-badarg03': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg03(self):
output = self.engine.render_to_string('if-tag-badarg03', {'y': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-badarg04': '{% if x|default_if_none:y %}yes{% else %}no{% endif %}'})
def test_if_tag_badarg04(self):
output = self.engine.render_to_string('if-tag-badarg04')
self.assertEqual(output, 'no')
@setup({'if-tag-single-eq': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_single_eq(self):
# A single equals sign is a syntax error.
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-single-eq', {'foo': 1})
| bsd-3-clause | 886,110,714,126,005,500 | 43.882798 | 105 | 0.574696 | false |
RenqinCai/python_dataset | LR/shuffle_v8.py | 2 | 13109 | ###new function:shuffling the reviewing time and debug the program
###R is set to a constant value
import simplejson as json
import datetime
import time
import numpy as np
import math
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from dateutil.relativedelta import *
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
def string_toDatetime(string):
return datetime.datetime.strptime(string, "%Y-%m-%d")
def string_toYear(string):
return datetime.datetime.strptime(string[0:4], "%Y").date()
def string_toYearMonth(string):
return datetime.datetime.strptime(string[0:7], "%Y-%m").date()
def monthDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2.year)*12 + (timeDate1.month-timeDate2.month)
def yearDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2)
def betweenTime(timeDate, downTime, upTime):
if ((monthDiff(timeDate, downTime) < 0)or(monthDiff(upTime, timeDate) < 0)):
return False
else:
return True
#####
###the data structure of userInfo is a list which stores the dict of a user
###userInfo {user:{"sinceTime":sinceTime, "reviewTime":reviewTime, "active":0,1, "friends":[]}}
###reviewTime represents the first time user review the business.
###for different business, the reviewTime and active is different
###timeUserData {time:[user]}
##########
def loadUser():
userInfo = {}
timeUserData = {}
defaultReviewTime = string_toYearMonth('2015-01')
defaultActive = 0
userSet = set()
userSum = 0
userFile = "../../dataset/user.json"
with open(userFile) as f:
for line in f:
userJson = json.loads(line)
user=userJson["user"]
friend = userJson["friends"]
sinceTime = string_toYearMonth(userJson["sinceTime"])
userInfo.setdefault(user, {})
userInfo[user]["sinceTime"] = sinceTime
userInfo[user]["reviewTime"] = defaultReviewTime
userInfo[user]["active"] = defaultActive
userInfo[user]["friends"] = []
timeUserData.setdefault(sinceTime, [])
timeUserData[sinceTime].append(user)
if friend:
for f in friend:
userInfo[user]["friends"].append(f)
userSum += 1
userSet.add(user)
userList = list(userSet)
print "load Friend"
print "total userSum %d"%userSum
return (userInfo, timeUserData, userSum, userList)
####load reviewData as format:{business:{reviewTime:[user]}}
####store reviewSum for a business as format: {business:businessSum}
###store timeReviewerDict_allBiz {time:[user]}
def loadReview():
reviewData = {}
reviewSum = {}
timeReviewerDict_allBiz = {}
reviewFile = "../../dataset/review.json"
with open(reviewFile) as f:
for line in f:
reviewJson = json.loads(line)
business = reviewJson["business"]
user = reviewJson["user"]
reviewTime = string_toYearMonth(reviewJson["date"])
reviewData.setdefault(business, {})
reviewData[business].setdefault(reviewTime, [])
reviewData[business][reviewTime].append(user)
timeReviewerDict_allBiz.setdefault(reviewTime, [])
timeReviewerDict_allBiz[reviewTime].append(user)
reviewSum.setdefault(business, 0)
reviewSum[business] += 1
return (reviewData, reviewSum, timeReviewerDict_allBiz)
###filter business which has more than 10 users in the period
####businessList contains these business
def filterReviewData(reviewData, reviewSum):
print "review process"
reviewSet = set()
for business in reviewSum.keys():
bNum = reviewSum[business]
if bNum > 50:
reviewSet.add(business)
reviewList = list(reviewSet)
# finalBusinessList = list(finalBusinessSet)
print "end process"
return (reviewList)
####selectBusiness which is a list contains the sequence number selected
def randomBusiness(totalNum, randomNum):
business = [i for i in range(totalNum)]
selectBusiness = []
for i in range(randomNum):
k = np.random.randint(0, totalNum-i)+i
temp = business[i]
business[i] = business[k]
business[k] = temp
selectBusiness.append(business[i])
return selectBusiness
#####from selectBusiness(a list)get the sequence number
###store the business_id into selectBusinessList.
def randomSelectBusiness(reviewList, selectBusinessNum):
businessSet = set(reviewList)
businessLen = len(businessSet)
if businessLen < selectBusinessNum:
selectBusinessList = reviewList
else:
selectBusiness = randomBusiness(businessLen, selectBusinessNum)
selectBusinessList = [reviewList[i] for i in selectBusiness]
return selectBusinessList
def increMonth(baseMonth):
return baseMonth+relativedelta(months=+1)
###cut part of the dict and sort the dict
def SortDict_Time(timeValDict, userInfo):
sortedTimeValDict = {}
timeList = sorted(timeValDict)
timeSet = set(timeList)
timeUserDict_oneBiz = {}##{time:[user]}
periodList = []
timeUserLenDict = {}
WList_oneBiz = [] ##w in the paper
tempWList_oneBiz = []
WSet_oneBiz = set()
monthRange = 18
if(monthRange > len(timeList)):
monthRange = len(timeList)
monthTime = timeList[0]
for i in range(monthRange):
periodList.append(monthTime)
if monthTime in timeSet:
sortedTimeValDict.setdefault(monthTime, [])
timeUserLenDict.setdefault(monthTime, 0)
reviewUserList = timeValDict[monthTime]
reviewUserSet = set(reviewUserList)
reviewUserSet = reviewUserSet.difference(WSet_oneBiz)
reviewUserList = list(reviewUserSet)
sortedTimeValDict[monthTime] = reviewUserList
timeUserLenDict[monthTime] = len(reviewUserList)
WSet_oneBiz = WSet_oneBiz.union(reviewUserSet)
monthTime = increMonth(monthTime)
WList_oneBiz = list(WSet_oneBiz)
tempWList_oneBiz = list(WSet_oneBiz)
for t in periodList:
for user in tempWList_oneBiz:
uSinceTime = userInfo[user]["sinceTime"]
if (monthDiff(uSinceTime, t)<=0):
timeUserDict_oneBiz.setdefault(t, [])
timeUserDict_oneBiz[t].append(user)
tempWList_oneBiz.remove(user)
return (sortedTimeValDict, periodList, WList_oneBiz, timeUserDict_oneBiz, timeUserLenDict)
###update the userInfo: "reviewTime", "active" for a business
def UpdateUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz, selectBusiness):
repeatReviewUserSet = set()
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewUserSet = set(reviewUserList)
for u in reviewUserSet:
preActive = userInfo[u]["active"]
if(preActive == 1):
continue
else:
userInfo[u]["active"] = 1
userInfo[u]["reviewTime"] = t
##timeReviewerDict_oneBiz {time:[reviewer]}
def ResetUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz):
defaultReviewTime = string_toYearMonth('2015-01')
defaultActive = 0
for t in timeReviewerDict_oneBiz.keys():
reviewUserSet = set(timeReviewerDict_oneBiz[t])
for u in reviewUserSet:
userInfo[u]["reviewTime"] = defaultReviewTime
userInfo[u]["active"] = defaultActive
def UpdateTimeReviewer_allBiz(reviewData, selectBusiness, timeReviewerDict_oneBiz):
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewData[selectBusiness][t] = reviewUserList
def ResetTimeReviewer_allBiz(reviewData, selectBusiness, timeReviewerDict_oneBiz):
for t in timeReviewerDict_oneBiz.keys():
reviewUserList = timeReviewerDict_oneBiz[t]
reviewData[selectBusiness][t] = reviewUserList
def compute_oneBiz(userInfo, selectBusiness, reviewData):
timereviewerDict_allBiz = dict(reviewData)
reviewDict_oneBiz = timereviewerDict_allBiz[selectBusiness]
(timeReviewerDict_oneBiz, periodList, WList_oneBiz, timeUserDict_oneBiz, timeUserLenDict) = SortDict_Time(reviewDict_oneBiz, userInfo)
###before permute
UpdateUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz, selectBusiness)
(LR_coef, LR_intercept) = LR_oneBiz(periodList, userInfo, timereviewerDict_allBiz)
ResetUserInfo_oneBiz(userInfo, timeReviewerDict_oneBiz)
###permuteTime
permute_timeReviewerDict_oneBiz = permuteTime(timeReviewerDict_oneBiz, timeUserDict_oneBiz, periodList, timeUserLenDict)
UpdateUserInfo_oneBiz(userInfo, permute_timeReviewerDict_oneBiz, selectBusiness)
UpdateTimeReviewer_allBiz(timereviewerDict_allBiz, selectBusiness, permute_timeReviewerDict_oneBiz)
(LR_coef2, LR_intercept2) = LR_oneBiz(periodList, userInfo, timereviewerDict_allBiz)
ResetUserInfo_oneBiz(userInfo, permute_timeReviewerDict_oneBiz)
ResetTimeReviewer_allBiz(timereviewerDict_allBiz, selectBusiness, timeReviewerDict_oneBiz)
return (LR_coef, LR_coef2)
def LR_oneBiz(periodList, userInfo, reviewData):
R = 10
Y = [0 for i in range(R+2)]
N = [0 for i in range(R+2)]
feature = []
output = []
activeZeroSum = 0
unactiveZeroSum = 0
positive = 0
negative = 0
totalReviewUserSet = set()
for t in periodList:
#print t
activeUserSet = set()
reviewUserSet = set()
raw_reviewUserSet = set()
###fix bugs: the reviewUserList_oneBiz does not change
for b in reviewData.keys():
if(reviewData[b].has_key(t)):
raw_reviewUserSet = raw_reviewUserSet.union(set(reviewData[b][t]))
reviewUserSet = raw_reviewUserSet
totalReviewUserSet=totalReviewUserSet.union(reviewUserSet)
for u in totalReviewUserSet:
uReviewTime = userInfo[u]["reviewTime"]
uActive = userInfo[u]["active"]
if(uActive == 1):
if (uReviewTime == t):
uActiveFriendSum = activeFriend_Sum(u, userInfo, t)
output.append(uActive)
positive += 1
if(uActiveFriendSum == 0):
activeZeroSum += 1
if uActiveFriendSum > R:
feature.append(R+1)
Y[R+1] += 1
else:
feature.append(uActiveFriendSum)
Y[uActiveFriendSum] += 1
activeUserSet.add(u)
else:
negative += 1
uActiveFriendSum = activeFriend_Sum(u, userInfo, t)
output.append(uActive)
if(uActiveFriendSum == 0):
unactiveZeroSum += 1
if uActiveFriendSum > R:
feature.append(R+1)
N[R+1] += 1
else:
feature.append(uActiveFriendSum)
N[uActiveFriendSum] += 1
totalReviewUserSet = totalReviewUserSet.difference(activeUserSet)
#print "positive %d negative %d"%(positive, negative)
(LR_coef, LR_intercept) = LR_result(feature, output)
return (LR_coef, LR_intercept)
def LR_result(x, y):
#print x
model = LogisticRegression()
x_feature = [[math.log(i+1)] for i in x]
model = model.fit(x_feature, y)
print model.score(x_feature, y)
return (model.coef_, model.intercept_)
def activeFriend_Sum(user, userInfo, uReviewTime):
friendList = userInfo[user]["friends"]
friendSet = set(friendList)
activeFriendSum = 0
friendLen = len(friendSet)
for f in friendSet:
fActive = userInfo[f]["active"]
if (fActive == 0):
continue
fReviewTime = userInfo[f]["reviewTime"]
if(monthDiff(fReviewTime, uReviewTime)<0):
activeFriendSum += 1
#print "active%d"%activeFriendSum
return activeFriendSum
def compute_oneBiz_helper(args):
return compute_oneBiz(*args)
def permuteTime(timeReviewerDict_oneBiz, timeUserDict_oneBiz, periodList, timeUserLenDict):
permute_timeReviewerDict_oneBiz = {}
totalSinceUserSet = set()
for t in periodList:
##todo
selectReviewerSum = 0
if timeUserLenDict.has_key(t):
selectReviewerSum = timeUserLenDict[t]
sinceUserSet = set()
if timeUserDict_oneBiz.has_key(t):
sinceUserList = timeUserDict_oneBiz[t]
sinceUserSet = set(sinceUserList)
totalSinceUserSet = totalSinceUserSet.union(sinceUserSet)
selectUserList = randomSelectBusiness(list(totalSinceUserSet), selectReviewerSum)
selectUserSet = set(selectUserList)
permute_timeReviewerDict_oneBiz.setdefault(t, [])
permute_timeReviewerDict_oneBiz[t] = selectUserList
totalSinceUserSet = totalSinceUserSet.difference(selectUserSet)
return permute_timeReviewerDict_oneBiz
def mainFunction():
f1_result = open("coef1_result.txt", "w")
f2_result = open("coef2_result.txt", "w")
(userInfo, timeUserData, userSum, userList) = loadUser()
(reviewData, reviewSum, timeReviewUser) = loadReview()
(reviewList) = filterReviewData(reviewData, reviewSum)
selectBusinessNum = 1
selectBusinessList = randomSelectBusiness(reviewList, selectBusinessNum)
selectBusinessSet = set(selectBusinessList)
beginTime = datetime.datetime.now()
positiveCoef = 0
negativeCoef = 0
results=[]
pool_args = [(userInfo, i, reviewData) for i in selectBusinessSet]
pool = ThreadPool(8)
results = pool.map(compute_oneBiz_helper, pool_args)
# results = []
# for i in range(selectBusinessNum):
# selectBusiness = selectBusinessList[i]
# reviewData_allBiz = dict(reviewData)
# (LR_coef, LR_coef2) = compute_oneBiz(userInfo, selectBusiness, reviewData_allBiz)
# results.append((LR_coef, LR_coef2))
for (LR_coef, LR_coef2) in results:
f1_result.write("%s\n"%LR_coef)
f2_result.write("%s\n"%LR_coef2)
endTime = datetime.datetime.now()
timeIntervals = (endTime-beginTime).seconds
print "time interval %s"%timeIntervals
f1_result.write("time interval %s"%timeIntervals)
f2_result.write("time interval %s"%timeIntervals)
f1_result.close()
f2_result.close()
mainFunction()
| gpl-2.0 | 7,831,754,431,706,486,000 | 28.131111 | 135 | 0.731101 | false |
kohnle-lernmodule/exeLearningPlus1_04 | twisted/internet/posixbase.py | 14 | 15431 | # -*- test-case-name: twisted.test.test_internet -*-
# $Id: default.py,v 1.90 2004/01/06 22:35:22 warner Exp $
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Posix reactor base class
API Stability: stable
Maintainer: U{Itamar Shtull-Trauring<mailto:[email protected]>}
"""
import warnings
import socket
import errno
import os
from zope.interface import implements, classImplements
from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram
from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorSSL, IReactorArbitrary
from twisted.internet.interfaces import IReactorProcess, IReactorMulticast
from twisted.internet.interfaces import IHalfCloseableDescriptor
from twisted.internet import error
from twisted.internet import tcp, udp
from twisted.python import log, threadable, failure, components, util
from twisted.persisted import styles
from twisted.python.runtime import platformType, platform
from twisted.internet.base import ReactorBase
try:
from twisted.internet import ssl
sslEnabled = True
except ImportError:
sslEnabled = False
try:
from twisted.internet import unix
unixEnabled = True
except ImportError:
unixEnabled = False
processEnabled = False
if platformType == 'posix':
from twisted.internet import fdesc
import process
processEnabled = True
if platform.isWindows():
try:
import win32process
processEnabled = True
except ImportError:
win32process = None
class _Win32Waker(log.Logger, styles.Ephemeral):
"""I am a workaround for the lack of pipes on win32.
I am a pair of connected sockets which can wake up the main loop
from another thread.
"""
disconnected = 0
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
# Following select_trigger (from asyncore)'s example;
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.IPPROTO_TCP, 1, 1)
server.bind(('127.0.0.1', 0))
server.listen(1)
client.connect(server.getsockname())
reader, clientaddr = server.accept()
client.setblocking(0)
reader.setblocking(0)
self.r = reader
self.w = client
self.fileno = self.r.fileno
def wakeUp(self):
"""Send a byte to my connection.
"""
try:
util.untilConcludes(self.w.send, 'x')
except socket.error, (err, msg):
if err != errno.WSAEWOULDBLOCK:
raise
def doRead(self):
"""Read some data from my connection.
"""
try:
self.r.recv(8192)
except socket.error:
pass
def connectionLost(self, reason):
self.r.close()
self.w.close()
self.reactor.waker = None
class _UnixWaker(log.Logger, styles.Ephemeral):
"""This class provides a simple interface to wake up the event loop.
This is used by threads or signals to wake up the event loop.
"""
disconnected = 0
i = None
o = None
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
self.i, self.o = os.pipe()
fdesc.setNonBlocking(self.i)
fdesc.setNonBlocking(self.o)
self.fileno = lambda: self.i
def doRead(self):
"""Read some bytes from the pipe.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
def wakeUp(self):
"""Write one byte to the pipe, and flush it.
"""
if self.o is not None:
try:
util.untilConcludes(os.write, self.o, 'x')
except OSError, e:
if e.errno != errno.EAGAIN:
raise
def connectionLost(self, reason):
"""Close both ends of my pipe.
"""
if not hasattr(self, "o"):
return
for fd in self.i, self.o:
try:
os.close(fd)
except IOError:
pass
del self.i, self.o
self.reactor.waker = None
if platformType == 'posix':
_Waker = _UnixWaker
elif platformType == 'win32':
_Waker = _Win32Waker
class PosixReactorBase(ReactorBase):
"""A basis for reactors that use file descriptors.
"""
implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast)
def __init__(self):
ReactorBase.__init__(self)
if self.usingThreads or platformType == "posix":
self.installWaker()
def _handleSignals(self):
"""Install the signal handlers for the Twisted event loop."""
try:
import signal
except ImportError:
log.msg("Warning: signal module unavailable -- not installing signal handlers.")
return
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
# only handle if there isn't already a handler, e.g. for Pdb.
signal.signal(signal.SIGINT, self.sigInt)
signal.signal(signal.SIGTERM, self.sigTerm)
# Catch Ctrl-Break in windows
if hasattr(signal, "SIGBREAK"):
signal.signal(signal.SIGBREAK, self.sigBreak)
if platformType == 'posix':
signal.signal(signal.SIGCHLD, self._handleSigchld)
def _handleSigchld(self, signum, frame, _threadSupport=platform.supportsThreads()):
"""Reap all processes on SIGCHLD.
This gets called on SIGCHLD. We do no processing inside a signal
handler, as the calls we make here could occur between any two
python bytecode instructions. Deferring processing to the next
eventloop round prevents us from violating the state constraints
of arbitrary classes.
"""
if _threadSupport:
self.callFromThread(process.reapAllProcesses)
else:
self.callLater(0, process.reapAllProcesses)
def startRunning(self, installSignalHandlers=1):
# Just in case we're started on a different thread than
# we're made on
threadable.registerAsIOThread()
self.fireSystemEvent('startup')
if installSignalHandlers:
self._handleSignals()
self.running = 1
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
while self.running:
try:
while self.running:
# Advance simulation time in delayed event
# processors.
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self.doIteration(t)
except:
log.msg("Unexpected error in main loop.")
log.deferr()
else:
log.msg('Main loop terminated.')
def _disconnectSelectable(self, selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
"""Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
self.removeReader(selectable)
f = faildict.get(why.__class__)
if f:
if (isRead and why.__class__ == error.ConnectionDone
and IHalfCloseableDescriptor.providedBy(selectable)):
selectable.readConnectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(failure.Failure(why))
def installWaker(self):
"""Install a `waker' to allow threads and signals to wake up the IO thread.
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
the reactor. On Windows we use a pair of sockets.
"""
if not self.waker:
self.waker = _Waker(self)
self.addReader(self.waker)
# IReactorProcess
def spawnProcess(self, processProtocol, executable, args=(),
env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
if platformType == 'posix':
if usePTY:
if childFDs is not None:
raise ValueError("Using childFDs is not supported with usePTY=True.")
return process.PTYProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY)
else:
return process.Process(self, executable, args, env, path,
processProtocol, uid, gid, childFDs)
elif platformType == "win32":
if uid is not None or gid is not None:
raise ValueError("The uid and gid parameters are not supported on Windows.")
if usePTY:
raise ValueError("The usePTY parameter is not supported on Windows.")
if childFDs:
raise ValueError("Customizing childFDs is not supported on Windows.")
if win32process:
from twisted.internet._dumbwin32proc import Process
return Process(self, processProtocol, executable, args, env, path)
else:
raise NotImplementedError, "spawnProcess not available since pywin32 is not installed."
else:
raise NotImplementedError, "spawnProcess only available on Windows or POSIX."
# IReactorUDP
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def connectUDP(self, remotehost, remoteport, protocol, localport=0,
interface='', maxPacketSize=8192):
"""DEPRECATED.
Connects a L{ConnectedDatagramProtocol} instance to a UDP port.
"""
warnings.warn("use listenUDP and then transport.connect().", DeprecationWarning, stacklevel=2)
p = udp.ConnectedPort((remotehost, remoteport), localport, protocol, interface, maxPacketSize, self)
p.startListening()
return p
# IReactorMulticast
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
"""Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple)
p.startListening()
return p
# IReactorUNIX
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""@see: twisted.internet.interfaces.IReactorUNIX.connectUNIX
"""
assert unixEnabled, "UNIX support is not present"
c = unix.Connector(address, factory, timeout, self, checkPID)
c.connect()
return c
def listenUNIX(self, address, factory, backlog=50, mode=0666, wantPID=0):
"""@see: twisted.internet.interfaces.IReactorUNIX.listenUNIX
"""
assert unixEnabled, "UNIX support is not present"
p = unix.Port(address, factory, backlog, mode, self, wantPID)
p.startListening()
return p
# IReactorUNIXDatagram
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=0666):
"""Connects a given L{DatagramProtocol} to the given path.
EXPERIMENTAL.
@returns: object conforming to L{IListeningPort}.
"""
assert unixEnabled, "UNIX support is not present"
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
p.startListening()
return p
def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=0666, bindAddress=None):
"""Connects a L{ConnectedDatagramProtocol} instance to a path.
EXPERIMENTAL.
"""
assert unixEnabled, "UNIX support is not present"
p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self)
p.startListening()
return p
# IReactorTCP
def listenTCP(self, port, factory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
# IReactorSSL (sometimes, not implemented)
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
assert sslEnabled, "SSL support is not present"
c = ssl.Connector(host, port, factory, contextFactory, timeout, bindAddress, self)
c.connect()
return c
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
assert sslEnabled, "SSL support is not present"
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p
# IReactorArbitrary
def listenWith(self, portType, *args, **kw):
kw['reactor'] = self
p = portType(*args, **kw)
p.startListening()
return p
def connectWith(self, connectorType, *args, **kw):
kw['reactor'] = self
c = connectorType(*args, **kw)
c.connect()
return c
def _removeAll(self, readers, writers):
"""Remove all readers and writers, and return list of Selectables.
Meant for calling from subclasses, to implement removeAll, like:
def removeAll(self):
return self._removeAll(reads, writes)
where reads, writes are iterables.
"""
readers = [reader for reader in readers if
reader is not self.waker]
readers_dict = {}
for reader in readers:
readers_dict[reader] = 1
for reader in readers:
self.removeReader(reader)
self.removeWriter(reader)
writers = [writer for writer in writers if
writer not in readers_dict]
for writer in writers:
self.removeWriter(writer)
return readers+writers
if sslEnabled:
classImplements(PosixReactorBase, IReactorSSL)
if unixEnabled:
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
if processEnabled:
classImplements(PosixReactorBase, IReactorProcess)
components.backwardsCompatImplements(PosixReactorBase)
__all__ = ["PosixReactorBase"]
| gpl-2.0 | 6,491,614,993,888,082,000 | 32.472885 | 108 | 0.623809 | false |
adazey/Muzez | libs/youtube_dl/extractor/cracked.py | 170 | 3213 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
str_to_int,
)
class CrackedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html'
_TESTS = [{
'url': 'http://www.cracked.com/video_19070_if-animal-actors-got-e21-true-hollywood-stories.html',
'md5': '89b90b9824e3806ca95072c4d78f13f7',
'info_dict': {
'id': '19070',
'ext': 'mp4',
'title': 'If Animal Actors Got E! True Hollywood Stories',
'timestamp': 1404954000,
'upload_date': '20140710',
}
}, {
# youtube embed
'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html',
'md5': 'ccd52866b50bde63a6ef3b35016ba8c7',
'info_dict': {
'id': 'EjI00A3rZD0',
'ext': 'mp4',
'title': "4 Plot Holes You Didn't Notice in Your Favorite Movies - The Spit Take",
'description': 'md5:c603708c718b796fe6079e2b3351ffc7',
'upload_date': '20140725',
'uploader_id': 'Cracked',
'uploader': 'Cracked',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_url = self._search_regex(
r'<iframe[^>]+src="((?:https?:)?//www\.youtube\.com/embed/[^"]+)"',
webpage, 'youtube url', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_url = self._html_search_regex(
[r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'],
webpage, 'video URL')
title = self._search_regex(
[r'property="?og:title"?\s+content="([^"]+)"', r'class="?title"?>([^<]+)'],
webpage, 'title')
description = self._search_regex(
r'name="?(?:og:)?description"?\s+content="([^"]+)"',
webpage, 'description', default=None)
timestamp = self._html_search_regex(
r'"date"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)
if timestamp:
timestamp = parse_iso8601(timestamp[:-6])
view_count = str_to_int(self._html_search_regex(
r'<span\s+class="?views"? id="?viewCounts"?>([\d,\.]+) Views</span>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'<span\s+id="?commentCounts"?>([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url)
if m:
width = int(m.group('width'))
height = int(m.group('height'))
else:
width = height = None
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'height': height,
'width': width,
}
| gpl-3.0 | 5,546,044,954,490,807,000 | 34.307692 | 111 | 0.516651 | false |
aadebuger/docker-apprtc | src/third_party/oauth2client/anyjson.py | 302 | 1044 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = '[email protected] (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| bsd-3-clause | 2,524,516,898,203,198,000 | 31.625 | 74 | 0.744253 | false |
Fedik/gramps | gramps/gen/filters/rules/repository/_hasreferencecountof.py | 5 | 1737 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Stephane Charette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasreferencecountbase import HasReferenceCountBase
#-------------------------------------------------------------------------
# "Repositories with a certain reference count"
#-------------------------------------------------------------------------
class HasReferenceCountOf(HasReferenceCountBase):
"""Repositories with a reference count of <count>"""
name = _('Repositories with a reference count of <count>')
description = _("Matches repositories with a certain reference count")
| gpl-2.0 | -3,611,541,252,253,918,000 | 38.477273 | 79 | 0.561313 | false |
gtko/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/brightcove.py | 4 | 10498 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
find_xpath_attr,
fix_xml_ampersands,
compat_urlparse,
compat_str,
compat_urllib_request,
compat_parse_qs,
ExtractorError,
unsmuggle_url,
unescapeHTML,
)
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'file': '2371591881001.mp4',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'file': '1785452137001.flv',
'info_dict': {
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
}
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
object_str = fix_xml_ampersands(object_str)
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return None
params = {}
playerID = find_param('playerID')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# The three fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?</object>''',
webpage)
return [cls._build_brighcove_url(m) for m in matches]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query_str, query, referer=referer)
else:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req = compat_urllib_request.Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
req.add_header('Referer', referer)
webpage = self._download_webpage(req, video_id)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' not in json_data:
raise ExtractorError('Empty playlist')
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id=playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': compat_str(video_info['id']),
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
renditions = sorted(renditions, key=lambda r: r['size'])
info['formats'] = [{
'url': rend['defaultURL'],
'height': rend.get('frameHeight'),
'width': rend.get('frameWidth'),
} for rend in renditions]
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % info['id'])
return info
| gpl-3.0 | 5,925,626,176,430,747,000 | 40.800797 | 193 | 0.569863 | false |
ilyashrn/ilyashrn.github.io | node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/dotnet.py | 197 | 26727 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 0.8.*
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|lock|new|null|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 1.5.*
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = dict(
none = '@?[_a-zA-Z][a-zA-Z0-9_]*',
basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
)
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9\.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[\(\){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(r'(?<!\.)(AddHandler|Alias|'
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
r'Implements|Inherits|Interface|'
r'Let|Lib|Loop|Me|MustInherit|'
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
r'Operator|Option|Optional|Overloads|Overridable|'
r'Overrides|ParamArray|Partial|Private|Protected|'
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
r'Return|Select|Set|Shadows|Shared|Single|'
r'Static|Step|Stop|SyncLock|Then|'
r'Throw|To|True|Try|TryCast|Wend|'
r'Using|When|While|Widening|With|WithEvents|'
r'WriteOnly)\b', Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>]',
Operator),
('"', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'_\n', Text), # Line continuation
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'),
(r'', Text, '#pop'), # any other syntax
],
'funcname': [
(r'[a-z_][a-z0-9_]*', Name.Function, '#pop'),
],
'classname': [
(r'[a-z_][a-z0-9_]*', Name.Class, '#pop'),
],
'namespace': [
(r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
(r'', Text, '#pop'),
]
}
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
#TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highligting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highligting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
*New in Pygments 1.5.*
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([a-zA-Z0-9_.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)([a-zA-Z0-9_]+)(\.)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
| mit | 47,234,803,839,739,560 | 38.831595 | 84 | 0.447375 | false |
ZubairLK/CI20_linux | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 | 4,787,123,094,976,010,000 | 25.01087 | 88 | 0.679733 | false |
flumotion-mirror/flumotion | flumotion/test/test_flavors.py | 3 | 15234 | # -*- Mode: Python; test-case-name: flumotion.test.test_flavors -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from twisted.internet import reactor, defer
from twisted.spread import pb
from zope.interface import implements
from flumotion.common import testsuite
from flumotion.twisted import flavors
class TestStateCacheable(flavors.StateCacheable):
pass
class TestStateRemoteCache(flavors.StateRemoteCache):
pass
pb.setUnjellyableForClass(TestStateCacheable, TestStateRemoteCache)
class FakeObject:
pass
class FakeListener:
# listener interface
implements(flavors.IStateListener)
def stateSet(self, state, key, value):
pass
def stateAppend(self, state, key, value):
pass
def stateRemove(self, state, key, value):
pass
class TestRoot(testsuite.TestManagerRoot):
def remote_getState(self):
self.state = TestStateCacheable()
self.state.addKey('name', 'lois')
self.state.addListKey('children')
self.state.addDictKey('nationalities')
return self.state
def remote_setStateName(self, name):
return self.state.set('name', name)
def remote_haggis(self):
return self.state.setitem('nationalities',
'mary queen of scots', 'scotland')
def remote_emigrate(self):
return self.state.setitem('nationalities',
'mary queen of scots', 'norway')
def remote_coup(self):
return self.state.delitem('nationalities',
'mary queen of scots')
def remote_bearChild(self, name):
return self.state.append('children', name)
def remote_haveAdopted(self, name):
return self.state.remove('children', name)
class StateTest(testsuite.TestCase):
def setUp(self):
self.changes = []
self.runServer()
def tearDown(self):
return self.stopServer()
def runClient(self):
f = pb.PBClientFactory()
self.cport = reactor.connectTCP("127.0.0.1", self.port, f)
d = f.getRootObject()
d.addCallback(self.clientConnected)
return d
def clientConnected(self, perspective):
self.perspective = perspective
self._dDisconnect = defer.Deferred()
perspective.notifyOnDisconnect(
lambda r: self._dDisconnect.callback(None))
def stopClient(self):
self.cport.disconnect()
return self._dDisconnect
def runServer(self):
factory = pb.PBServerFactory(TestRoot())
factory.unsafeTracebacks = 1
self.sport = reactor.listenTCP(0, factory, interface="127.0.0.1")
self.port = self.sport.getHost().port
def stopServer(self):
d = self.sport.stopListening()
return d
class TestStateSet(StateTest):
def testStateSet(self):
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def set_state(state):
d.state = state
self.failUnless(state)
self.failUnlessEqual(state.get('name'), 'lois')
self.assertRaises(KeyError, state.get, 'dad')
return self.perspective.callRemote('setStateName', 'clark')
def check_name(_):
self.failUnlessEqual(d.state.get('name'), 'clark')
d.addCallback(set_state)
d.addCallback(check_name)
d.addCallback(lambda _: self.stopClient())
return d
def testStateAppendRemove(self):
# start everything
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def set_state_and_bear_child(state):
d.state = state
self.failUnless(state)
self.failUnlessEqual(state.get('children'), [])
return self.perspective.callRemote('bearChild', 'robin')
def check_first_kid_and_bear_again(_):
self.failUnlessEqual(d.state.get('children'), ['robin'])
return self.perspective.callRemote('bearChild', 'robin')
def check_second_kid_and_give_away(_):
self.failUnlessEqual(d.state.get('children'), ['robin', 'robin'])
return self.perspective.callRemote('haveAdopted', 'robin')
def check_after_adopt_and_bear_again(_):
self.failUnlessEqual(d.state.get('children'), ['robin'])
return self.perspective.callRemote('bearChild', 'batman')
def check_third_kid_and_stop(_):
self.failUnlessEqual(d.state.get('children'), ['robin', 'batman'])
return self.stopClient()
d.addCallback(set_state_and_bear_child)
d.addCallback(check_first_kid_and_bear_again)
d.addCallback(check_second_kid_and_give_away)
d.addCallback(check_after_adopt_and_bear_again)
d.addCallback(check_third_kid_and_stop)
return d
def testStateWrongListener(self):
# start everything
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def got_state_and_stop(state):
self.assertRaises(Exception, state.addListener, FakeObject())
self.assertRaises(KeyError, state.removeListener, FakeObject())
return self.stopClient()
d.addCallback(got_state_and_stop)
return d
def listen(self, state):
def event(type):
return lambda *x: self.changes.append((type, ) + x)
state.addListener(self, set_=event('set'), append=event('append'),
remove=event('remove'), setitem=event('setitem'),
delitem=event('delitem'))
# listener tests
def testStateSetListener(self):
# start everything and get the state
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
# ask server to set the name
def add_listener_and_set_name(state):
d.state = state # monkeypatch
self.listen(state)
return self.perspective.callRemote('setStateName', 'robin')
def check_results(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('set', d.state, 'name', 'robin'))
return self.stopClient()
d.addCallback(add_listener_and_set_name)
d.addCallback(check_results)
return d
def testStateAppendRemoveListener(self):
# start everything and get the state
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def add_listener_and_bear_child(state):
d.state = state # monkeypatch
self.listen(state)
return self.perspective.callRemote('bearChild', 'robin')
def check_append_results_and_adopt_kid(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('append', d.state, 'children', 'robin'))
return self.perspective.callRemote('haveAdopted', 'robin')
def check_remove_results_and_bear_child(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('remove', d.state, 'children', 'robin'))
return self.perspective.callRemote('bearChild', 'batman')
def check_append_results_and_stop(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('append', d.state, 'children', 'batman'))
return self.stopClient()
d.addCallback(add_listener_and_bear_child)
d.addCallback(check_append_results_and_adopt_kid)
d.addCallback(check_remove_results_and_bear_child)
d.addCallback(check_append_results_and_stop)
return d
def testStateDictListener(self):
# start everything and get the state
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def add_listener_and_haggis(state):
d.state = state # monkeypatch
self.listen(state)
return self.perspective.callRemote('haggis')
def check_set_results_and_emigrate(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('setitem', d.state, 'nationalities',
'mary queen of scots', 'scotland'))
return self.perspective.callRemote('emigrate')
def check_set_results_and_coup_de_etat(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('setitem', d.state, 'nationalities',
'mary queen of scots', 'norway'))
return self.perspective.callRemote('coup')
def check_remove_results_and_stop(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('delitem', d.state,
'nationalities',
'mary queen of scots', 'norway'))
return self.stopClient()
d.addCallback(add_listener_and_haggis)
d.addCallback(check_set_results_and_emigrate)
d.addCallback(check_set_results_and_coup_de_etat)
d.addCallback(check_remove_results_and_stop)
return d
class TestFullListener(StateTest):
def testStateSetListener(self):
# start everything and get the state
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def customStateSet(state, key, value):
self.changes.append(('set', state, key, value))
# ask server to set the name
def add_listener_and_set_name(state):
d.state = state # monkeypatch
state.addListener(self, set_=customStateSet)
return self.perspective.callRemote('setStateName', 'robin')
def check_results(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('set', d.state, 'name', 'robin'))
return self.stopClient()
d.addCallback(add_listener_and_set_name)
d.addCallback(check_results)
return d
def testStateAppendRemoveListener(self):
# start everything and get the state
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
def customStateAppend(state, key, value):
self.changes.append(('append', state, key, value))
def customStateRemove(state, key, value):
self.changes.append(('remove', state, key, value))
def add_listener_and_bear_child(state):
d.state = state # monkeypatch
# here test the positional-arguments code
state.addListener(self, append=customStateAppend,
remove=customStateRemove)
return self.perspective.callRemote('bearChild', 'robin')
def check_append_results_and_adopt_kid(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('append', d.state, 'children', 'robin'))
return self.perspective.callRemote('haveAdopted', 'robin')
def check_remove_results_and_bear_child(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('remove', d.state, 'children', 'robin'))
return self.perspective.callRemote('bearChild', 'batman')
def check_append_results_and_stop(_):
c = self.changes.pop()
self.failUnlessEqual(c, ('append', d.state, 'children', 'batman'))
return self.stopClient()
d.addCallback(add_listener_and_bear_child)
d.addCallback(check_append_results_and_adopt_kid)
d.addCallback(check_remove_results_and_bear_child)
d.addCallback(check_append_results_and_stop)
return d
def testInvalidate(self):
calls = []
def check_invalidation(state):
def invalidate(obj):
calls.append(('invalidate', obj))
def unused(*args):
assert False, 'should not be reached'
self.assertEquals(calls, [])
state.addListener(1, invalidate=invalidate)
state.invalidate()
# basic invalidation
self.assertEquals(calls, [('invalidate', state)])
# connecting after invalidation
state.addListener(2, invalidate=invalidate)
self.assertEquals(calls, [('invalidate', state),
('invalidate', state)])
state.addListener(3, set_=unused)
self.assertEquals(calls, [('invalidate', state),
('invalidate', state)])
return self.stopClient()
d = self.runClient()
d.addCallback(lambda _: self.perspective.callRemote('getState'))
d.addCallback(check_invalidation)
return d
class TestState(testsuite.TestCase):
def testStateAddKey(self):
c = flavors.StateCacheable()
c.addListKey('list')
self.failUnless(c.hasKey('list'))
self.failIf(c.hasKey('randomkey'))
l = c.get('list')
self.failUnlessEqual(len(l), 0)
c.append('list', 'item')
l = c.get('list')
self.failUnlessEqual(len(l), 1)
self.failUnlessEqual(l[0], 'item')
c.addListKey('two')
l = c.get('two')
self.failUnlessEqual(len(l), 0)
c.append('two', 'B')
l = c.get('two')
self.assertEqual(len(l), 1)
self.assertEqual(l[0], 'B')
def testStateGet(self):
c = flavors.StateCacheable()
c.addKey('akey')
c.set('akey', 'avalue')
self.assertEquals(c.get('akey'), 'avalue')
self.assertRaises(KeyError, c.get, 'randomkey')
def testStateAppendRemove(self):
c = flavors.StateCacheable()
c.addListKey('alist')
c.append('alist', 'avalue')
self.assertEquals(c.get('alist'), ['avalue', ])
self.assertRaises(KeyError, c.append, 'randomlistkey', 'value')
c.remove('alist', 'avalue')
self.assertEquals(c.get('alist'), [])
self.assertRaises(KeyError, c.remove, 'randomlistkey', 'value')
self.assertRaises(ValueError, c.remove, 'alist', 'value')
def testStateDictAppendRemove(self):
c = flavors.StateCacheable()
c.addDictKey('adict')
c.setitem('adict', 'akey', 'avalue')
self.assertEquals(c.get('adict'), {'akey': 'avalue'})
c.setitem('adict', 'akey', 'bvalue')
self.assertEquals(c.get('adict'), {'akey': 'bvalue'})
c.delitem('adict', 'akey')
self.assertEquals(c.get('adict'), {})
self.assertRaises(KeyError, c.delitem, 'randomdictkey', 'value')
self.assertRaises(KeyError, c.delitem, 'adict', 'akey')
| lgpl-2.1 | -7,720,369,496,067,587,000 | 33.310811 | 78 | 0.604897 | false |
narurien/ganeti-ceph | lib/__init__.py | 8 | 1196 | #
#
# Copyright (C) 2006, 2007, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# empty file for package definition
"""Ganeti python modules"""
try:
from ganeti import ganeti
except ImportError:
pass
else:
raise Exception("A module named \"ganeti.ganeti\" was successfully imported"
" and should be removed as it can lead to importing the"
" wrong module(s) in other parts of the code, consequently"
" leading to failures which are difficult to debug")
| gpl-2.0 | 7,883,533,817,714,225,000 | 34.176471 | 78 | 0.720736 | false |
herilalaina/scikit-learn | sklearn/learning_curve.py | 27 | 15421 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause | 8,882,318,032,998,034,000 | 41.836111 | 79 | 0.640814 | false |
mateusz-holenko/gf | utils.py | 1 | 1321 | #!/usr/bin/python3
import codecs
##########################################################################################################
# #
# Based on: #
# http://stackoverflow.com/questions/436220/python-is-there-a-way-to-determine-the-encoding-of-text-file #
# #
##########################################################################################################
encodings = ['utf-8', 'windows-1250']
def try_read_file(file, action, fail_action):
global encodings
for e in encodings:
try:
with codecs.open(file, 'r', encoding=e) as f:
for line in f.readlines():
action(line)
return True
except UnicodeDecodeError:
fail_action(e)
else:
return False
def read_file(file):
global encodings
for e in encodings:
try:
with codecs.open(file, 'r', encoding=e) as f:
return f.readlines()
except UnicodeDecodeError:
pass
return None
| mit | -4,922,440,124,433,611,000 | 39.030303 | 106 | 0.348221 | false |
f-enye/Lister | Lister/forms.py | 1 | 1160 | from flask.ext.wtf import Form
from wtforms import TextField, PasswordField, BooleanField, IntegerField
from wtforms.validators import Required, Email, EqualTo
class LoginForm(Form):
user_name = TextField('user_name', validators = [Required(), Email(message=u'Invalid email address')])
#Not an encrypted password
password = PasswordField('password', validators = [Required()])
class SignupForm(Form):
user_name = TextField('user_name', validators = [Required(), Email(message=u'Invalid email address')])
password = PasswordField('password', validators = [Required(), EqualTo('confirm_password', message='Passwords must match')])
confirm_password = PasswordField('confirm_password', validators = [Required()])
class ListForm(Form):
list_name = TextField('list_name', validators = [Required()])
class ItemForm(Form):
item_name = TextField('item_name', validators = [Required()])
check = BooleanField('check')
class SearchForm(Form):
user_name = TextField('user_name', validators = [Required()])
class ShareListForm(Form):
user_name = TextField('user_name', validators = [Required()])
list_id = IntegerField('list_id', validators = [Required()]) | mit | 4,811,601,060,408,015,000 | 42 | 125 | 0.738793 | false |
akosthekiss/fuzzinator | tests/resources/mock_tool.py | 2 | 1955 | #!/usr/bin/env python3
# Copyright (c) 2016 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import argparse
import os
import signal
import sys
def main():
parser = argparse.ArgumentParser(description='Mock tool with control over its output & termination.')
parser.add_argument('--print-args', action='store_true', default=False,
help='print the (non-option) arguments')
parser.add_argument('--print-env', metavar='VAR', type=str, default=None,
help='print an environment variable')
parser.add_argument('--echo-stdin', action='store_true', default=False,
help='echo the standard input')
parser.add_argument('--to-stderr', action='store_true', default=False,
help='write to standard error instead of standard output')
parser.add_argument('--crash', action='store_true', default=False,
help='crash process after output')
parser.add_argument('--exit-code', metavar='N', type=int, default=0,
help='terminate process with given exit code (default: %(default)s)')
parser.add_argument('args', nargs='*',
help='arbitrary command line arguments')
args = parser.parse_args()
out = sys.stderr if args.to_stderr else sys.stdout
if args.print_args:
for arg in args.args:
print(arg, file=out, flush=True)
if args.print_env is not None:
print(os.getenv(args.print_env, ''), file=out, flush=True)
if args.echo_stdin:
for line in sys.stdin:
print(line, file=out, end='', flush=True)
if args.crash:
os.kill(os.getpid(), signal.SIGSEGV)
sys.exit(args.exit_code)
if __name__ == '__main__':
main()
| bsd-3-clause | 7,403,249,259,583,787,000 | 35.203704 | 105 | 0.623529 | false |
oesteban/tract_querier | tract_querier/tractography/trackvis.py | 1 | 3309 | from itertools import izip
from warnings import warn
import numpy
from tractography import Tractography
from nibabel import trackvis
def tractography_to_trackvis_file(filename, tractography, affine=None, image_dimensions=None):
trk_header = trackvis.empty_header()
if affine is not None:
pass
elif hasattr(tractography, 'affine'):
affine = tractography.affine
else:
raise ValueError("Affine transform has to be provided")
trackvis.aff_to_hdr(affine, trk_header, True, True)
if image_dimensions is not None:
trk_header['dim'] = image_dimensions
elif hasattr(tractography, 'image_dimensions'):
trk_header['dim'] = image_dimensions
else:
raise ValueError("Image dimensions needed to save a trackvis file")
orig_data = tractography.tracts_data()
data = {}
for k, v in orig_data.iteritems():
if not isinstance(v[0], numpy.ndarray):
continue
if (v[0].ndim > 1 and any(d > 1 for d in v[0].shape[1:])):
warn(
"Scalar data %s ignored as trackvis "
"format does not handle multivalued data" % k
)
else:
data[k] = v
#data_new = {}
# for k, v in data.iteritems():
# if (v[0].ndim > 1 and v[0].shape[1] > 1):
# for i in xrange(v[0].shape[1]):
# data_new['%s_%02d' % (k, i)] = [
# v_[:, i] for v_ in v
# ]
# else:
# data_new[k] = v
trk_header['n_count'] = len(tractography.tracts())
trk_header['n_properties'] = 0
trk_header['n_scalars'] = len(data)
if len(data) > 10:
raise ValueError('At most 10 scalars permitted per point')
trk_header['scalar_name'][:len(data)] = numpy.array(
[n[:20] for n in data],
dtype='|S20'
)
trk_tracts = []
for i, sl in enumerate(tractography.tracts()):
scalars = None
if len(data) > 0:
scalars = numpy.vstack([
data[k][i].squeeze()
for k in trk_header['scalar_name'][:len(data)]
]).T
trk_tracts.append((sl, scalars, None))
trackvis.write(filename, trk_tracts, trk_header, points_space='rasmm')
def tractography_from_trackvis_file(filename):
tracts_and_data, header = trackvis.read(filename, points_space='rasmm')
tracts, scalars, properties = izip(*tracts_and_data)
scalar_names = [n for n in header['scalar_name'] if len(n) > 0]
#scalar_names_unique = []
#scalar_names_subcomp = {}
# for sn in scalar_names:
# if re.match('.*_[0-9]{2}', sn):
# prefix = sn[:sn.rfind('_')]
# if prefix not in scalar_names_unique:
# scalar_names_unique.append(prefix)
# scalar_names_subcomp[prefix] = int(sn[-2:])
# scalar_names_subcomp[prefix] = max(sn[-2:], scalar_names_subcomp[prefix])
# else:
# scalar_names_unique.append(sn)
tracts_data = {}
for i, sn in enumerate(scalar_names):
tracts_data[sn] = [scalar[:, i][:, None] for scalar in scalars]
affine = header['vox_to_ras']
image_dims = header['dim']
tr = Tractography(
tracts, tracts_data,
affine=affine, image_dims=image_dims
)
return tr
| bsd-3-clause | -6,762,158,326,531,147,000 | 29.638889 | 94 | 0.570565 | false |
gangadharkadam/verveerp | erpnext/accounts/report/profit_and_loss_statement/profit_and_loss_statement.py | 6 | 1220 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from erpnext.accounts.report.financial_statements import (get_period_list, get_columns, get_data)
def execute(filters=None):
period_list = get_period_list(filters.fiscal_year, filters.periodicity)
income = get_data(filters.company, "Income", "Credit", period_list, ignore_closing_entries=True)
expense = get_data(filters.company, "Expense", "Debit", period_list, ignore_closing_entries=True)
net_profit_loss = get_net_profit_loss(income, expense, period_list)
data = []
data.extend(income or [])
data.extend(expense or [])
if net_profit_loss:
data.append(net_profit_loss)
columns = get_columns(period_list)
return columns, data
def get_net_profit_loss(income, expense, period_list):
if income and expense:
net_profit_loss = {
"account_name": _("Net Profit / Loss"),
"account": None,
"warn_if_negative": True
}
for period in period_list:
net_profit_loss[period.key] = flt(income[-2][period.key] - expense[-2][period.key], 3)
return net_profit_loss
| agpl-3.0 | -326,651,965,682,978,940 | 31.105263 | 98 | 0.727049 | false |
crdx/face | module.py | 1 | 2271 | import traceback
import imp
class Module():
def __init__(self, module_filename):
# the filename e.g. mod_list.py
self.filename = module_filename
# the filename without extension
self.name = module_filename[:-3]
# start marked as unloaded, so this will hold true if anything goes
# terribly wrong with loading the module later on
self.loaded = False
self.load()
def load(self):
try:
self.module = __import__(self.name)
print("[+] Loaded {0}".format(self.name))
self.loaded = True
except Exception:
print("[!] Failed to load {0}:".format(self.name))
traceback.print_exc()
def reload(self):
if not self.loaded:
self.load()
else:
try:
imp.reload(self.module)
print("[/] Reloaded {0}".format(self.name))
except Exception:
print("[!] Failed to reload {0}:".format(self.name))
traceback.print_exc()
self.loaded = False
# Return "True" to halt further processing of modules.
def handle_event(self, event_handled, p):
# skip if we aren't loaded
if not self.loaded:
return event_handled
# all modules always have their global event handler called
self.call_method("global_handler", event_handled, p)
# if this event hasn't been handled yet, see if this module will handle
# it
if not event_handled:
return self.call_method("on_{0}".format(p.event.type), event_handled, p)
else:
return event_handled
# Return "True" to halt further processing of modules.
def call_method(self, method_name, event_handled, p):
if method_name in dir(self.module):
method = getattr(self.module, method_name)
try:
# pass the module's return value back
return method(p)
except Exception:
print("Failed to call method {0} in {1}:".format(method_name, self.name))
traceback.print_exc()
# we didn't call this module, so maintain current state
return event_handled
| mit | -4,608,306,803,859,073,000 | 32.895522 | 89 | 0.563628 | false |
kuangtu/OpenMAMA | mama/c_cpp/src/regression/c_cpp/scripts/regression.py | 11 | 1631 | #!/usr/bin/python
import unittest
import optparse
from common import check_env
from regressionc import TestOpenmamac
from regressioncpp import TestOpenmamacpp
import globals
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("--tport", dest="transport",nargs=2,help='Name of pub and sub transport in mama.properties')
parser.add_option("--m",dest="middleware",default="avis",choices=['avis','lbm','wmw'],help='Select Middleware: avis,lbm or wmw')
parser.add_option("--test",dest="test",default="all",choices=['c','cpp','all'],help='Select test: c, cpp or all')
parser.add_option("--timeout",dest="timeout",type="int",help="Timeout for wait4text")
parser.add_option("--q",action="store_false",dest="verbose",help="Suppress verbose output")
(option,argument) = parser.parse_args()
check_env()
globals.MIDDLEWARE = option.middleware
if option.verbose is not None:
globals.VERBOSE = option.verbose
if option.transport is not None:
globals.TRANSPORTPUB = option.transport[0]
globals.TRANSPORTSUB = option.transport[1]
test = option.test
if option.timeout is not None:
globals.TIMEOUT = option.timeout
c_suite = unittest.TestLoader().loadTestsFromTestCase(TestOpenmamac)
cpp_suite = unittest.TestLoader().loadTestsFromTestCase(TestOpenmamacpp)
all_suite = unittest.TestSuite([c_suite,cpp_suite])
if test=='c':
unittest.TextTestRunner().run(c_suite)
elif test=='cpp':
unittest.TextTestRunner().run(cpp_suite)
elif test=='all':
unittest.TextTestRunner().run(all_suite)
| lgpl-2.1 | 8,499,315,104,238,466,000 | 38.780488 | 132 | 0.692826 | false |
sio2project/sioworkers | sio/workers/util.py | 1 | 8159 | from __future__ import absolute_import
from contextlib import contextmanager
import pkg_resources
import time
import logging
import stat
import os
import json
import tempfile
import shutil
import threading
import six
logger = logging.getLogger(__name__)
def first_entry_point(group, name=None):
for ep in pkg_resources.iter_entry_points(group, name):
try:
return ep.load()
except ImportError as e:
logger.warning(
'ImportError: %s: %s'
% (
ep,
e,
)
)
pass
raise RuntimeError("Module providing '%s:%s' not found" % (group, name or ''))
class PerfTimer(object):
def __init__(self):
self.start_time = time.time()
@property
def elapsed(self):
return time.time() - self.start_time
def s2ms(seconds):
"""Converts ``seconds`` to miliseconds
>>> s2ms(1.95)
1950
"""
return int(1000 * seconds)
def ms2s(miliseconds):
"""Converts ``miliseconds`` to seconds and returns float.
>>> '%.2f' % ms2s(1190)
'1.19'
"""
return miliseconds / 1000.0
def ceil_ms2s(miliseconds):
"""Returns first integer count of seconds not less that ``miliseconds``.
>>> ceil_ms2s(1000)
1
>>> ceil_ms2s(1001)
2
"""
return int((miliseconds + 999) / 1000)
class Writable(object):
"""Context manager making file writable.
It's not safe to use it concurrently on the same file, but nesting is ok.
"""
def __init__(self, fname):
self.orig_mode = os.stat(fname).st_mode
self.change_needed = ~(self.orig_mode & stat.S_IWUSR)
self.fname = fname
def __enter__(self):
if self.change_needed:
os.chmod(self.fname, self.orig_mode | stat.S_IWUSR)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.change_needed:
os.chmod(self.fname, self.orig_mode)
def rmtree(path):
def remove_readonly(fn, path, excinfo):
with Writable(os.path.normpath(os.path.dirname(path))):
fn(path)
shutil.rmtree(path, onerror=remove_readonly)
threadlocal_dir = threading.local()
def tempcwd(path=None):
# Someone might call tempcwd twice, i.e. tempcwd(tempcwd('something'))
# Do nothing in this case.
if path is not None and os.path.isabs(path):
return path
d = threadlocal_dir.tmpdir
if path:
return os.path.join(d, path)
else:
return d
class TemporaryCwd(object):
"""Helper class for changing the working directory."""
def __init__(self, inner_directory=None):
self.extra = inner_directory
self.path = None
self.old_path = None
def __enter__(self):
self.path = tempfile.mkdtemp(prefix='sioworkers_')
logger.info('Using temporary directory %s', self.path)
p = self.path
if self.extra:
p = os.path.join(self.path, self.extra)
os.mkdir(p)
self.old_path = getattr(threadlocal_dir, 'tmpdir', None)
threadlocal_dir.tmpdir = p
return self
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.path)
threadlocal_dir.tmpdir = self.old_path
def path_join_abs(base, subpath):
"""Joins two absolute paths making ``subpath`` relative to ``base``.
>>> import os.path
>>> os.path.join('/usr', '/bin/sh')
'/bin/sh'
>>> path_join_abs('/usr', '/bin/sh')
'/usr/bin/sh'
"""
return os.path.join(base, subpath.strip(os.sep))
def replace_invalid_UTF(a_string):
"""Replaces invalid characters in a string.
In python 2 strings are also bytestrings.
In python 3 it returns a string.
"""
if six.PY2:
return a_string.decode('utf-8', 'replace').encode('utf-8')
else:
if not isinstance(a_string, six.string_types):
return a_string.decode('utf-8', 'replace')
else:
return a_string.encode('utf-8', 'replace').decode()
class CompatibleJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode("ASCII")
return super(CompatibleJSONEncoder, self).default(obj)
def json_dumps(obj, **kwargs):
"""Python 3 and 2 compatible json.dump."""
kwargs.setdefault('cls', CompatibleJSONEncoder)
return json.dumps(obj, **kwargs)
def decode_fields(fields):
def _decode_decorator(func):
def _wrapper(*args, **kwargs):
result_dict = func(*args, **kwargs)
for field in fields:
if not isinstance(result_dict[field], six.string_types):
result_dict[field] = result_dict[field].decode()
return result_dict
return _wrapper
return _decode_decorator
def null_ctx_manager():
def dummy():
yield
return contextmanager(dummy)()
# Copied and stripped from oioioi/base/utils/__init__.py
class ClassInitMeta(type):
"""Meta class triggering __classinit__ on class intialization."""
def __init__(cls, class_name, bases, new_attrs):
super(ClassInitMeta, cls).__init__(class_name, bases, new_attrs)
cls.__classinit__()
class ClassInitBase(six.with_metaclass(ClassInitMeta, object)):
"""Abstract base class injecting ClassInitMeta meta class."""
@classmethod
def __classinit__(cls):
"""
Empty __classinit__ implementation.
This must be a no-op as subclasses can't reliably call base class's
__classinit__ from their __classinit__s.
Subclasses of __classinit__ should look like:
.. python::
class MyClass(ClassInitBase):
@classmethod
def __classinit__(cls):
# Need globals().get as MyClass may be still undefined.
super(globals().get('MyClass', cls),
cls).__classinit__()
...
class Derived(MyClass):
@classmethod
def __classinit__(cls):
super(globals().get('Derived', cls),
cls).__classinit__()
...
"""
pass
class RegisteredSubclassesBase(ClassInitBase):
"""A base class for classes which should have a list of subclasses
available.
The list of subclasses is available in their :attr:`subclasses` class
attributes. Classes which have *explicitly* set :attr:`abstract` class
attribute to ``True`` are not added to :attr:`subclasses`.
It the superclass defines :classmethod:`register_subclass` class
method, then it is called with subclass upon registration.
"""
@classmethod
def __classinit__(cls):
this_cls = globals().get('RegisteredSubclassesBase', cls)
super(this_cls, cls).__classinit__()
if this_cls is cls:
# This is RegisteredSubclassesBase class.
return
assert 'subclasses' not in cls.__dict__, (
'%s defines attribute subclasses, but has '
'RegisteredSubclassesMeta metaclass' % (cls,)
)
cls.subclasses = []
cls.abstract = cls.__dict__.get('abstract', False)
def find_superclass(cls):
superclasses = [c for c in cls.__bases__ if issubclass(c, this_cls)]
if not superclasses:
return None
if len(superclasses) > 1:
raise AssertionError(
'%s derives from more than one '
'RegisteredSubclassesBase' % (cls.__name__,)
)
superclass = superclasses[0]
return superclass
# Add the class to all superclasses' 'subclasses' attribute, including
# self.
superclass = cls
while superclass is not this_cls:
if not cls.abstract:
superclass.subclasses.append(cls)
if hasattr(superclass, 'register_subclass'):
superclass.register_subclass(cls)
superclass = find_superclass(superclass)
| gpl-3.0 | 2,986,877,489,215,617,500 | 26.657627 | 82 | 0.585611 | false |
guorendong/iridium-browser-ubuntu | tools/perf/page_sets/mse_cases.py | 11 | 2033 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class MseCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(MseCasesPage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(MseCasesPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition('window.__testDone == true')
class MseCasesPageSet(page_set_module.PageSet):
""" Media source extensions perf benchmark """
def __init__(self):
super(MseCasesPageSet, self).__init__(bucket=page_set_module.PUBLIC_BUCKET)
urls_list = [
'file://mse_cases/startup_test.html?testType=AV',
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=V',
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=A',
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true&doNotWaitForBodyOnLoad=true',
]
for url in urls_list:
self.AddUserStory(MseCasesPage(url, self))
| bsd-3-clause | -5,869,827,624,236,792,000 | 42.255319 | 104 | 0.719626 | false |
quom/google-cloud-python | speech/unit_tests/test_operation.py | 3 | 4075 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestOperation(unittest.TestCase):
OPERATION_NAME = '123456789'
@staticmethod
def _get_target_class():
from google.cloud.speech.operation import Operation
return Operation
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
client = object()
operation = self._make_one(
self.OPERATION_NAME, client)
self.assertEqual(operation.name, self.OPERATION_NAME)
self.assertIs(operation.client, client)
self.assertIsNone(operation.target)
self.assertIsNone(operation.response)
self.assertIsNone(operation.results)
self.assertIsNone(operation.error)
self.assertIsNone(operation.metadata)
self.assertEqual(operation.caller_metadata, {})
self.assertTrue(operation._from_grpc)
@staticmethod
def _make_result(transcript, confidence):
from google.cloud.grpc.speech.v1beta1 import cloud_speech_pb2
return cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[
cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript,
confidence=confidence,
),
],
)
def _make_operation_pb(self, *results):
from google.cloud.grpc.speech.v1beta1 import cloud_speech_pb2
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
any_pb = None
if results:
result_pb = cloud_speech_pb2.AsyncRecognizeResponse(
results=results,
)
type_url = 'type.googleapis.com/%s' % (
result_pb.DESCRIPTOR.full_name,)
any_pb = Any(type_url=type_url,
value=result_pb.SerializeToString())
return operations_pb2.Operation(
name=self.OPERATION_NAME,
response=any_pb)
def test__update_state_no_response(self):
client = object()
operation = self._make_one(
self.OPERATION_NAME, client)
operation_pb = self._make_operation_pb()
operation._update_state(operation_pb)
self.assertIsNone(operation.response)
self.assertIsNone(operation.results)
def test__update_state_with_response(self):
from google.cloud.speech.alternative import Alternative
client = object()
operation = self._make_one(
self.OPERATION_NAME, client)
text = 'hi mom'
confidence = 0.75
result = self._make_result(text, confidence)
operation_pb = self._make_operation_pb(result)
operation._update_state(operation_pb)
self.assertIsNotNone(operation.response)
self.assertEqual(len(operation.results), 1)
alternative = operation.results[0]
self.assertIsInstance(alternative, Alternative)
self.assertEqual(alternative.transcript, text)
self.assertEqual(alternative.confidence, confidence)
def test__update_state_bad_response(self):
client = object()
operation = self._make_one(
self.OPERATION_NAME, client)
result1 = self._make_result('is this ok?', 0.625)
result2 = self._make_result('ease is ok', None)
operation_pb = self._make_operation_pb(result1, result2)
with self.assertRaises(ValueError):
operation._update_state(operation_pb)
| apache-2.0 | 7,644,044,069,742,198,000 | 34.434783 | 74 | 0.644663 | false |
andrewjrobinson/FreeCAD_sf_master | src/Mod/Web/Init.py | 3 | 2471 | # FreeCAD init script of the Web module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class WebDocument:
"Web document"
def Info(self):
return "Web document"
# Get the Parameter Group of this module
ParGrp = App.ParamGet("System parameter:Modules").GetGroup("Web")
# Set the needed information
ParGrp.SetString("HelpIndex", "Web/Help/index.html")
ParGrp.SetString("DocTemplateName", "Web")
ParGrp.SetString("DocTemplateScript","TemplWeb.py")
ParGrp.SetString("WorkBenchName", "Web Design")
ParGrp.SetString("WorkBenchModule", "WebWorkbench.py")
#FreeCAD.EndingAdd("CAD formats (*.igs *.iges *.step *.stp *.brep *.brp)","Web")
| lgpl-2.1 | 2,400,063,932,515,068,400 | 50.574468 | 80 | 0.460138 | false |
wcybxzj/django_xadmin17 | demo_app/polls/models.py | 1 | 1814 | import datetime
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self): # __unicode__ on Python 2
return self.question_text
def __unicode__(self):
return u"%s" % self.question_text
def was_published_recently(self):
now = timezone.now()
return now >= self.pub_date >= now - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self): # __unicode__ on Python 2
return self.choice_text
class Task(models.Model):
summary = models.CharField(max_length=32)
content = models.TextField()
reported_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (
('view_task', 'View task'),
)
class Post(models.Model):
title = models.CharField('title', max_length=64)
slug = models.SlugField(max_length=64)
content = models.TextField('content')
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
permissions = (
('view_post', 'Can view post'),
)
get_latest_by = 'created_at'
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return {'post_slug': self.slug} | bsd-3-clause | 5,847,153,851,304,024,000 | 28.274194 | 71 | 0.644432 | false |
redhat-openstack/manila | manila/api/auth.py | 4 | 1414 | # Copyright (c) 2013 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from manila.api.middleware import auth
from manila.i18n import _LW
LOG = log.getLogger(__name__)
class ManilaKeystoneContext(auth.ManilaKeystoneContext):
def __init__(self, application):
LOG.warn(_LW('manila.api.auth:ManilaKeystoneContext is deprecated. '
'Please use '
'manila.api.middleware.auth:ManilaKeystoneContext '
'instead.'))
super(ManilaKeystoneContext, self).__init__(application)
def pipeline_factory(loader, global_conf, **local_conf):
LOG.warn(_LW('manila.api.auth:pipeline_factory is deprecated. Please use '
'manila.api.middleware.auth:pipeline_factory instead.'))
auth.pipeline_factory(loader, global_conf, **local_conf)
| apache-2.0 | 3,575,692,297,736,211,000 | 37.216216 | 78 | 0.69024 | false |
joone/chromium-crosswalk | tools/metrics/histograms/print_style.py | 25 | 1809 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Holds the constants for pretty printing histograms.xml."""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import pretty_print_xml
# Desired order for tag attributes; attributes listed here will appear first,
# and in the same order as in these lists.
# { tag_name: [attribute_name, ...] }
ATTRIBUTE_ORDER = {
'enum': ['name', 'type'],
'histogram': ['name', 'enum', 'units'],
'int': ['value', 'label'],
'histogram_suffixes': ['name', 'separator', 'ordering'],
'suffix': ['name', 'label'],
'affected-histogram': ['name'],
'with-suffix': ['name'],
}
# Tag names for top-level nodes whose children we don't want to indent.
TAGS_THAT_DONT_INDENT = [
'histogram-configuration',
'histograms',
'histogram_suffixes_list',
'enums'
]
# Extra vertical spacing rules for special tag names.
# {tag_name: (newlines_after_open, newlines_before_close, newlines_after_close)}
TAGS_THAT_HAVE_EXTRA_NEWLINE = {
'histogram-configuration': (2, 1, 1),
'histograms': (2, 1, 1),
'histogram': (1, 1, 1),
'histogram_suffixes_list': (2, 1, 1),
'histogram_suffixes': (1, 1, 1),
'enums': (2, 1, 1),
'enum': (1, 1, 1),
}
# Tags that we allow to be squished into a single line for brevity.
TAGS_THAT_ALLOW_SINGLE_LINE = [
'summary',
'int',
'owner',
]
def GetPrintStyle():
"""Returns an XmlStyle object for pretty printing histograms."""
return pretty_print_xml.XmlStyle(ATTRIBUTE_ORDER,
TAGS_THAT_HAVE_EXTRA_NEWLINE,
TAGS_THAT_DONT_INDENT,
TAGS_THAT_ALLOW_SINGLE_LINE)
| bsd-3-clause | 6,914,631,645,876,960,000 | 29.661017 | 80 | 0.635158 | false |
citrix-openstack-build/nova | nova/api/openstack/compute/contrib/volumes.py | 11 | 24507 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import uuidutils
from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
authorize_attach = extensions.extension_authorizer('compute',
'volume_attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
vol['metadata'] = self.extract_metadata(metadata_node)
return vol
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
self.volume_api.delete(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
vol = body['volume']
vol_type = vol.get('volume_type', None)
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
availability_zone = vol.get('availability_zone', None)
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self, ext_mgr=None):
self.compute_api = compute.API()
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
assigned_mountpoint = None
for bdm in bdms:
if bdm['volume_id'] == volume_id:
assigned_mountpoint = bdm['device_name']
break
if assigned_mountpoint is None:
LOG.debug("volume_id not found")
raise exc.HTTPNotFound()
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
def _validate_volume_id(self, volume_id):
if not uuidutils.is_uuid_like(volume_id):
msg = _("Bad volumeId format: volumeId is "
"not in proper format (%s)") % volume_id
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
self._validate_volume_id(volume_id)
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
try:
instance = self.compute_api.get(context, server_id)
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume')
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
if (not self.ext_mgr or
not self.ext_mgr.is_loaded('os-volume-attachment-update')):
raise exc.HTTPBadRequest()
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='update')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
old_volume_id = id
old_volume = self.volume_api.get(context, old_volume_id)
new_volume_id = body['volumeAttachment']['volumeId']
self._validate_volume_id(new_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
try:
instance = self.compute_api.get(context, server_id,
want_objects=True)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
found = False
try:
for bdm in bdms:
if bdm['volume_id'] != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
volume = self.volume_api.get(context, volume_id)
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
found = False
try:
for bdm in bdms:
if bdm['volume_id'] != volume_id:
continue
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume')
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm['volume_id']:
results.append(entity_maker(bdm['volume_id'],
bdm['instance_uuid'],
bdm['device_name']))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
self.volume_api.delete_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
force = snapshot.get('force', False)
try:
force = strutils.bool_from_string(force, strict=True)
except ValueError:
msg = _("Invalid value '%s' for force.") % force
raise exception.InvalidParameterValue(err=msg)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support."""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00+00:00"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
attachment_controller = VolumeAttachmentController(self.ext_mgr)
res = extensions.ResourceExtension('os-volume_attachments',
attachment_controller,
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| apache-2.0 | -7,660,357,670,572,134,000 | 32.943213 | 79 | 0.599869 | false |
aetilley/scikit-learn | sklearn/datasets/samples_generator.py | 45 | 56433 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause | 7,597,670,310,769,805,000 | 33.326642 | 79 | 0.616714 | false |
timsnyder/bokeh | tests/integration/tools/test_poly_draw_tool.py | 5 | 9020 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import time
# External imports
# Bokeh imports
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomAction, CustomJS, Plot, Range1d, MultiLine, Circle, PolyDrawTool, Div
from bokeh._testing.util.compare import cds_data_almost_equal
from bokeh._testing.util.selenium import RECORD
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.bokeh",
)
def _make_plot(num_objects=0, drag=True, vertices=False):
source = ColumnDataSource(dict(xs=[[1, 2]], ys=[[1, 1]]))
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 3), y_range=Range1d(0, 3), min_border=0)
renderer = plot.add_glyph(source, MultiLine(xs='xs', ys='ys'))
tool = PolyDrawTool(num_objects=num_objects, drag=drag, renderers=[renderer])
if vertices:
psource = ColumnDataSource(dict(x=[], y=[]))
prenderer = plot.add_glyph(psource, Circle(x='x', y='y', size=10))
tool.vertex_renderer = prenderer
plot.add_tools(tool)
plot.toolbar.active_multi = tool
code = RECORD("xs", "source.data.xs") + RECORD("ys", "source.data.ys")
plot.add_tools(CustomAction(callback=CustomJS(args=dict(source=source), code=code)))
plot.toolbar_sticky = False
return plot
def _make_server_plot(expected):
def modify_doc(doc):
source = ColumnDataSource(dict(xs=[[1, 2]], ys=[[1, 1]]))
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 3), y_range=Range1d(0, 3), min_border=0)
renderer = plot.add_glyph(source, MultiLine(xs='xs', ys='ys'))
tool = PolyDrawTool(renderers=[renderer])
plot.add_tools(tool)
plot.toolbar.active_multi = tool
div = Div(text='False')
def cb(attr, old, new):
if cds_data_almost_equal(new, expected):
div.text = 'True'
source.on_change('data', cb)
code = RECORD("matches", "div.text")
plot.add_tools(CustomAction(callback=CustomJS(args=dict(div=div), code=code)))
doc.add_root(column(plot, div))
return modify_doc
@pytest.mark.integration
@pytest.mark.selenium
class Test_PolyDrawTool(object):
def test_selected_by_default(self, single_plot_page):
plot = _make_plot()
page = single_plot_page(plot)
button = page.get_toolbar_button('poly-draw')
assert 'active' in button.get_attribute('class')
assert page.has_no_console_errors()
def test_can_be_deselected_and_selected(self, single_plot_page):
plot = _make_plot()
page = single_plot_page(plot)
# Check is active
button = page.get_toolbar_button('poly-draw')
assert 'active' in button.get_attribute('class')
# Click and check is not active
button = page.get_toolbar_button('poly-draw')
button.click()
assert 'active' not in button.get_attribute('class')
# Click again and check is active
button = page.get_toolbar_button('poly-draw')
button.click()
assert 'active' in button.get_attribute('class')
assert page.has_no_console_errors()
def test_double_click_triggers_draw(self, single_plot_page):
plot = _make_plot()
page = single_plot_page(plot)
# ensure double clicking adds a poly
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.5)
page.click_custom_action()
expected = {"xs": [[1, 2], [1.6216216216216217, 2.4324324324324325]],
"ys": [[1, 1], [1.5, 0.75]]}
assert cds_data_almost_equal(page.results, expected)
assert page.has_no_console_errors()
def test_click_snaps_to_vertex(self, single_plot_page):
plot = _make_plot(vertices=True)
page = single_plot_page(plot)
# ensure double clicking adds a poly
page.double_click_canvas_at_position(200, 200)
page.click_canvas_at_position(300, 300)
time.sleep(0.5)
page.double_click_canvas_at_position(201, 201)
time.sleep(0.5)
page.click_custom_action()
expected = {"xs": [[1, 2], [1.6216216216216217, 2.4324324324324325, 1.6216216216216217]],
"ys": [[1, 1], [1.5, 0.75, 1.5]]}
assert cds_data_almost_equal(page.results, expected)
assert page.has_no_console_errors()
def test_drag_moves_multi_line(self, single_plot_page):
plot = _make_plot()
page = single_plot_page(plot)
# ensure clicking adds a point
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.4) # hammerJS click timeout
page.drag_canvas_at_position(200, 200, 70, 50)
page.click_custom_action()
expected = {"xs": [[1, 2], [2.1891891891891895, 3]],
"ys": [[1, 1], [1.125, 0.375]]}
assert cds_data_almost_equal(page.results, expected)
assert page.has_no_console_errors()
def test_drag_does_not_move_multi_line(self, single_plot_page):
plot = _make_plot(drag=False)
page = single_plot_page(plot)
# ensure clicking adds a point
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.4) # hammerJS click timeout
page.drag_canvas_at_position(200, 200, 70, 53)
page.click_custom_action()
expected = {"xs": [[1, 2], [1.6216216216216217, 2.4324324324324325]],
"ys": [[1, 1], [1.5, 0.75]] }
assert cds_data_almost_equal(page.results, expected)
assert page.has_no_console_errors()
def test_num_object_limits_multi_lines(self, single_plot_page):
plot = _make_plot(num_objects=1)
page = single_plot_page(plot)
# ensure clicking adds a point
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.4) # hammerJS click timeout
page.drag_canvas_at_position(200, 200, 70, 50)
page.click_custom_action()
expected = {"xs": [[2.1891891891891895, 3]],
"ys": [[1.125, 0.375]]}
assert cds_data_almost_equal(page.results, expected)
assert page.has_no_console_errors()
def test_poly_draw_syncs_to_server(self, bokeh_server_page):
expected = {"xs": [[1, 2], [1.6216216216216217, 2.4324324324324325]],
"ys": [[1, 1], [1.5, 0.75]]}
page = bokeh_server_page(_make_server_plot(expected))
# ensure double clicking adds a poly
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.5)
page.click_custom_action()
assert page.results == {"matches": "True"}
def test_poly_drag_syncs_to_server(self, bokeh_server_page):
expected = {"xs": [[1, 2], [2.1891891891891895, 3]],
"ys": [[1, 1], [1.125, 0.375]]}
page = bokeh_server_page(_make_server_plot(expected))
# ensure dragging move multi_line
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.4) # hammerJS click timeout
page.drag_canvas_at_position(200, 200, 70, 50)
page.click_custom_action()
assert page.results == {"matches": "True"}
def test_poly_delete_syncs_to_server(self, bokeh_server_page):
expected = {"xs": [[1, 2]],
"ys": [[1, 1]]}
page = bokeh_server_page(_make_server_plot(expected))
page.double_click_canvas_at_position(200, 200)
page.double_click_canvas_at_position(300, 300)
time.sleep(0.4) # hammerJS click timeout
page.click_canvas_at_position(200, 200)
time.sleep(0.4) # hammerJS click timeout
page.send_keys(u'\ue003') # Backspace
time.sleep(0.4) # hammerJS click timeout
page.click_custom_action()
assert page.results == {"matches": "True"}
| bsd-3-clause | -434,002,051,971,621,700 | 36.119342 | 118 | 0.575942 | false |
selpp/TidmarshCWD | src/web/website/settings.py | 1 | 3708 | """
Django settings for website project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import os.path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%r-8co1%9%^^b_9br$us$u@(db_jp2oy3tuq!eywwhc_sw&8c6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tidmarsh',
'channels',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['tidmarsh/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
#
# CHANNEL_LAYERS = {
# "default":{
# "BACKEND": "asgi_redis.RedisChannelLayer",
# "CONFIG": {
# "hosts": [os.environ.get('REDIS_URL','redis://localhost:6379')],
# },
# "ROUTING": "website.routing.channel_routing",
# },
# }
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "website.routing.channel_routing",
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join('static'),
)
| gpl-3.0 | -9,157,656,981,170,509,000 | 24.22449 | 91 | 0.666127 | false |
leilihh/nova | nova/db/sqlalchemy/migrate_repo/versions/228_add_metrics_in_compute_nodes.py | 47 | 1583 | # Copyright 2013 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column metrics to save metrics info for compute nodes
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
metrics = Column('metrics', Text, nullable=True)
shadow_metrics = Column('metrics', Text, nullable=True)
compute_nodes.create_column(metrics)
shadow_compute_nodes.create_column(shadow_metrics)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
compute_nodes.drop_column('metrics')
shadow_compute_nodes.drop_column('metrics')
| apache-2.0 | 3,904,094,944,710,098,400 | 34.977273 | 78 | 0.727732 | false |
addgene/research | toolkit/utils.py | 1 | 1283 | import argparse
import logging
import os
import sys
from Bio import SeqIO
from setuptools import glob
L = logging.getLogger(__name__)
def read_fastq_file(file_name):
# type: (str) -> List[str]
reads = []
with open(file_name, "rU") as handle:
for record in SeqIO.parse(handle, "fastq"):
reads.append(str(record.seq))
reads.append(str(record.seq.reverse_complement()))
return reads
def setup_dirs(params):
input = params.input_folder
output = params.output_folder
if not os.path.isdir(input):
raise ValueError('The folder ' + input + ' does not exist.')
if not os.path.isdir(output):
L.info('\nThe folder ' + output + ' does not exist. Creating it...\n')
os.mkdir(output)
def get_fastq_files(params):
# type: (object) -> List[str]
# grab all files ending in .fastq
input_files = [input_file for input_file in glob.glob("{}/*.fastq".format(params.input_folder))]
if not input_files:
raise ValueError('No FASTQ files in folder: ' + params.input_folder)
return input_files
def log_to_stdout(level):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(level) | gpl-3.0 | 8,681,008,888,895,365,000 | 25.204082 | 100 | 0.654716 | false |
tom31203120/POSTMan-Chrome-Extension | tests/selenium/pmtests/postman_tests_layout.py | 104 | 1075 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsLayout(PostmanTests):
def test_1_toggle_sidebar(self):
sidebar_toggle = self.browser.find_element_by_id("sidebar-toggle")
sidebar_toggle.click()
time.sleep(1)
sidebar = self.browser.find_element_by_id("sidebar")
sidebar_style = sidebar.get_attribute("style")
if sidebar_style.find("5px") < 0:
self.print_failed("test_toggle_sidebar")
else:
sidebar_toggle.click()
time.sleep(1)
sidebar_style = sidebar.get_attribute("style")
if sidebar_style.find("350px") > 0:
return True
else:
return False
PostmanTestsLayout().run()
| apache-2.0 | -3,393,964,256,090,353,000 | 33.677419 | 74 | 0.67907 | false |
jmetzen/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause | -6,200,865,334,653,854,000 | 20.938462 | 66 | 0.614306 | false |
hoxmark/TDT4501-Specialization-Project | vse/data.py | 1 | 13775 | import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import nltk
from PIL import Image
# from pycocotools.coco import COCO
import numpy as np
import json as jsonmod
def get_paths(path, name='coco', use_restval=False):
"""
Returns paths to images and annotations for the given datasets. For MSCOCO
indices are also returned to control the data split being used.
The indices are extracted from the Karpathy et al. splits using this
snippet:
>>> import json
>>> dataset=json.load(open('dataset_coco.json','r'))
>>> A=[]
>>> for i in range(len(D['images'])):
... if D['images'][i]['split'] == 'val':
... A+=D['images'][i]['sentids'][:5]
...
:param name: Dataset names
:param use_restval: If True, the the `restval` data is included in train.
"""
roots = {}
ids = {}
if 'coco' == name:
imgdir = os.path.join(path, 'images')
capdir = os.path.join(path, 'annotations')
roots['train'] = {
'img': os.path.join(imgdir, 'train2014'),
'cap': os.path.join(capdir, 'captions_train2014.json')
}
roots['val'] = {
'img': os.path.join(imgdir, 'val2014'),
'cap': os.path.join(capdir, 'captions_val2014.json')
}
roots['test'] = {
'img': os.path.join(imgdir, 'val2014'),
'cap': os.path.join(capdir, 'captions_val2014.json')
}
roots['trainrestval'] = {
'img': (roots['train']['img'], roots['val']['img']),
'cap': (roots['train']['cap'], roots['val']['cap'])
}
ids['train'] = np.load(os.path.join(capdir, 'coco_train_ids.npy'))
ids['val'] = np.load(os.path.join(capdir, 'coco_dev_ids.npy'))[:5000]
ids['test'] = np.load(os.path.join(capdir, 'coco_test_ids.npy'))
ids['trainrestval'] = (
ids['train'],
np.load(os.path.join(capdir, 'coco_restval_ids.npy')))
if use_restval:
roots['train'] = roots['trainrestval']
ids['train'] = ids['trainrestval']
elif 'f8k' == name:
imgdir = os.path.join(path, 'images')
cap = os.path.join(path, 'dataset_flickr8k.json')
roots['train'] = {'img': imgdir, 'cap': cap}
roots['val'] = {'img': imgdir, 'cap': cap}
roots['test'] = {'img': imgdir, 'cap': cap}
ids = {'train': None, 'val': None, 'test': None}
elif 'f30k' == name:
imgdir = os.path.join(path, 'images')
cap = os.path.join(path, 'dataset_flickr30k.json')
roots['train'] = {'img': imgdir, 'cap': cap}
roots['val'] = {'img': imgdir, 'cap': cap}
roots['test'] = {'img': imgdir, 'cap': cap}
ids = {'train': None, 'val': None, 'test': None}
return roots, ids
class FlickrDataset(data.Dataset):
"""
Dataset loader for Flickr30k and Flickr8k full datasets.
"""
def __init__(self, root, json, split, vocab, transform=None):
self.root = root
self.vocab = vocab
self.split = split
self.transform = transform
self.dataset = jsonmod.load(open(json, 'r'))['images']
self.ids = []
for i, d in enumerate(self.dataset):
if d['split'] == split:
self.ids += [(i, x) for x in range(len(d['sentences']))]
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
vocab = self.vocab
root = self.root
ann_id = self.ids[index]
img_id = ann_id[0]
caption = self.dataset[img_id]['sentences'][ann_id[1]]['raw']
path = self.dataset[img_id]['filename']
image = Image.open(os.path.join(root, path)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(
str(caption).lower().decode('utf-8'))
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target, index, img_id
def __len__(self):
return len(self.ids)
class PrecompDataset(data.Dataset):
"""
Load precomputed captions and image features
Possible options: f8k, f30k, coco, 10crop
"""
def __init__(self, data_path, data_split, vocab):
self.vocab = vocab
loc = data_path + '/'
# Captions
self.captions = []
with open(loc+'%s_caps.txt' % data_split, 'rb') as f:
for line in f:
self.captions.append(line.strip())
# Image features
self.images = np.load(loc+'%s_ims.npy' % data_split)
self.length = len(self.captions)
# rkiros data has redundancy in images, we divide by 5, 10crop doesn't
if self.images.shape[0] != self.length:
self.im_div = 5
else:
self.im_div = 1
# the development set for coco is large and so validation would be slow
if data_split == 'dev':
self.length = 5000
def delete_indices(self, indices):
self.images = np.delete(self.images, indices, axis=0)
self.captions = np.delete(self.captions, indices, axis=0)
self.length = len(self.captions)
def __getitem__(self, index):
# handle the image redundancy
img_id = int(index/self.im_div)
image = torch.Tensor(self.images[img_id])
caption = self.captions[index]
vocab = self.vocab
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(
str(caption).lower().decode('utf-8'))
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target, index, img_id
def __len__(self):
return self.length
class ActiveDataset(data.Dataset):
"""
Initially empty dataset to contain the train
data used for active learning.
"""
def __init__(self, vocab):
self.captions = []
self.images = []
self.length = len(self.captions)
self.vocab = vocab
def __getitem__(self, index):
image = torch.Tensor(self.images[index])
caption = self.captions[index]
target = torch.Tensor(caption)
return image, target, index, index
def __len__(self):
# return self.length
return len(self.captions)
def add_single(self, image, caption):
self.images.append(image)
self.captions.append(caption)
# self.length = len(self.captions)
def add_multiple(self, images, captions):
self.images.extend(images)
self.captions.extend(captions)
# self.length = len(self.captions)
def collate_fn(data):
"""Build mini-batch tensors from a list of (image, caption) tuples.
Args:
data: list of (image, caption) tuple.
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, ids, img_ids = zip(*data)
# Merge images (convert tuple of 3D tensor to 4D tensor)
images = torch.stack(images, 0)
# Merget captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths, ids
def get_loader_single(data_name, split, root, json, vocab, transform,
batch_size=100, shuffle=True,
num_workers=2, ids=None, collate_fn=collate_fn):
# """Returns torch.utils.data.DataLoader for custom coco dataset."""
# if 'coco' in data_name:
# # COCO custom dataset
# dataset = CocoDataset(root=root,
# json=json,
# vocab=vocab,
# transform=transform, ids=ids)
if 'f8k' in data_name or 'f30k' in data_name:
dataset = FlickrDataset(root=root,
split=split,
json=json,
vocab=vocab,
transform=transform)
# Data loader
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader
def get_precomp_loader(data_path, data_split, vocab, opt, batch_size=100,
shuffle=True, num_workers=2):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
dset = PrecompDataset(data_path, data_split, vocab)
data_loader = torch.utils.data.DataLoader(dataset=dset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn)
return data_loader
def get_active_loader(vocab, batch_size=100, shuffle=True, num_workers=2):
dset = ActiveDataset(vocab)
data_loader = torch.utils.data.DataLoader(dataset=dset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn)
return data_loader
def get_transform(data_name, split_name, opt):
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
t_list = []
if split_name == 'train':
t_list = [transforms.RandomResizedCrop(opt.crop_size),
transforms.RandomHorizontalFlip()]
elif split_name == 'val':
t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
elif split_name == 'test':
t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
t_end = [transforms.ToTensor(), normalizer]
transform = transforms.Compose(t_list + t_end)
return transform
def get_loaders(data_name, vocab, crop_size, batch_size, workers, opt):
dpath = os.path.join(opt.data_path, data_name)
if opt.data_name.endswith('_precomp'):
train_loader = get_precomp_loader(dpath, 'train', vocab, opt,
batch_size, True, workers)
val_loader = get_precomp_loader(dpath, 'dev', vocab, opt,
batch_size, False, workers)
active_loader = get_active_loader(vocab)
else:
# Build Dataset Loader
roots, ids = get_paths(dpath, data_name, opt.use_restval)
transform = get_transform(data_name, 'train', opt)
train_loader = get_loader_single(opt.data_name, 'train',
roots['train']['img'],
roots['train']['cap'],
vocab, transform, ids=ids['train'],
batch_size=batch_size, shuffle=True,
num_workers=workers,
collate_fn=collate_fn)
transform = get_transform(data_name, 'val', opt)
val_loader = get_loader_single(opt.data_name, 'val',
roots['val']['img'],
roots['val']['cap'],
vocab, transform, ids=ids['val'],
batch_size=batch_size, shuffle=False,
num_workers=workers,
collate_fn=collate_fn)
active_loader = get_active_loader(vocab)
return active_loader, train_loader, val_loader
def get_test_loader(split_name, data_name, vocab, crop_size, batch_size,
workers, opt):
dpath = os.path.join(opt.data_path, data_name)
if opt.data_name.endswith('_precomp'):
test_loader = get_precomp_loader(dpath, split_name, vocab, opt,
batch_size, False, workers)
else:
# Build Dataset Loader
roots, ids = get_paths(dpath, data_name, opt.use_restval)
transform = get_transform(data_name, split_name, opt)
test_loader = get_loader_single(opt.data_name, split_name,
roots[split_name]['img'],
roots[split_name]['cap'],
vocab, transform, ids=ids[split_name],
batch_size=batch_size, shuffle=False,
num_workers=workers,
collate_fn=collate_fn)
return test_loader
| mit | -8,470,512,414,975,171,000 | 37.370474 | 79 | 0.531543 | false |
olivierdalang/stdm | data/importexport/writer.py | 1 | 5017 | #begin: 26th March 2012
#copyright: (c) 2012 by John Gitau
#email: [email protected]
#about: Wrapper class for writing PostgreSQL/PostGIS tables to user-defined OGR formats
import sys, logging
from PyQt4.QtCore import *
from PyQt4.QtGui import *
try:
from osgeo import gdal
from osgeo import ogr
except:
import gdal
import ogr
from stdm.data import (
columnType,
geometryType
)
from enums import *
class OGRWriter():
def __init__(self,targetFile):
self._ds=None
self._targetFile = targetFile
def reset(self):
#Destroy
self._ds = None
def getDriverName(self):
#Return the name of the driver derived from the file extension
fi = QFileInfo(self._targetFile)
fileExt = str(fi.suffix())
return drivers[fileExt]
def getLayerName(self):
#Simply derived from the file name
fi = QFileInfo(self._targetFile)
return str(fi.baseName())
def createField(self,table,field):
#Creates an OGR field
colType = columnType(table,field)
#Get OGR type
ogrType = ogrTypes[colType]
field_defn = ogr.FieldDefn(field,ogrType)
return field_defn
def db2Feat(self,parent,table,results,columns,geom=""):
#Execute the export process
#Create driver
drv = ogr.GetDriverByName(self.getDriverName())
if drv is None:
raise Exception("{0} driver not available.".format(self.getDriverName()))
#Create data source
self._ds = drv.CreateDataSource(self._targetFile)
if self._ds is None:
raise Exception("Creation of output file failed.")
#Create layer
if geom != "":
pgGeomType,srid = geometryType(table,geom)
geomType = wkbTypes[pgGeomType]
else:
geomType=ogr.wkbNone
lyr = self._ds.CreateLayer(self.getLayerName(),None,geomType)
if lyr is None:
raise Exception("Layer creation failed")
#Create fields
for c in columns:
#SQLAlchemy string values are in unicode so decoding is required in order to use in OGR
encodedFieldName = c.encode('utf-8')
field_defn = self.createField(table,encodedFieldName)
if lyr.CreateField(field_defn) != 0:
raise Exception("Creating %s field failed"%(c))
#Add Geometry column to list for referencing in the result set
if geom != "":
columns.append(geom)
featGeom=None
#Configure progress dialog
initVal=0
numFeat = results.rowcount
progress = QProgressDialog("","&Cancel",initVal,numFeat,parent)
progress.setWindowModality(Qt.WindowModal)
lblMsgTemp = "Writing {0} of {1} to file..."
#Iterate the result set
for r in results:
#Progress dialog
progress.setValue(initVal)
progressMsg = lblMsgTemp.format(str(initVal+1),str(numFeat))
progress.setLabelText(progressMsg)
if progress.wasCanceled():
break
#Create OGR Feature
feat = ogr.Feature(lyr.GetLayerDefn())
for i in range(len(columns)):
colName = columns[i]
#Check if its the geometry column in the iteration
if colName==geom:
if r[i] is not None:
featGeom=ogr.CreateGeometryFromWkt(r[i])
else:
featGeom=ogr.CreateGeometryFromWkt("")
feat.SetGeometry(featGeom)
else:
fieldValue = r[i]
if isinstance(fieldValue,unicode):
fieldValue = fieldValue.encode('utf-8')
feat.SetField(i,fieldValue)
if lyr.CreateFeature(feat) != 0:
progress.close()
raise Exception("Failed to create feature in %s"%(self._targetFile))
if featGeom is not None:
featGeom.Destroy()
feat.Destroy()
initVal+=1
progress.setValue(numFeat)
| gpl-2.0 | 8,475,736,998,902,070,000 | 31.019737 | 119 | 0.484353 | false |
docprofsky/gr-rds | python/rdspanel.py | 2 | 7742 | # -*- coding: UTF-8 -*-
import wx
import pmt
from gnuradio import gr, blocks
wxDATA_EVENT = wx.NewEventType()
def EVT_DATA_EVENT(win, func):
win.Connect(-1, -1, wxDATA_EVENT, func)
class DataEvent(wx.PyEvent):
def __init__(self, data):
wx.PyEvent.__init__(self)
self.SetEventType (wxDATA_EVENT)
self.data = data
def Clone (self):
self.__class__ (self.GetId())
class rdsPanel(gr.sync_block):
def __init__(self, freq, *args, **kwds):
gr.sync_block.__init__(
self,
name = "rds_panel",
in_sig = None,
out_sig = None,
)
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.panel = rdsWxPanel(freq, *args, **kwds);
def handle_msg(self, msg):
if(pmt.is_tuple(msg)):
t = pmt.to_long(pmt.tuple_ref(msg, 0))
m = pmt.symbol_to_string(pmt.tuple_ref(msg, 1))
de = DataEvent([t, m])
wx.PostEvent(self.panel, de)
del de
def set_frequency(self, freq=None):
freq_str = "xxx.xx"
if freq is not None:
if isinstance(freq, float) or isinstance(freq, int):
freq_str = "%.2f" % (float(freq) / 1e6)
else:
freq_str = str(freq)
de = DataEvent([7, freq_str])
wx.PostEvent(self.panel, de)
del de
class rdsWxPanel(wx.Panel):
def __init__(self, freq, *args, **kwds):
kwds["style"] = wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.label_1 = wx.StaticText(self, -1, "Frequency")
self.label_2 = wx.StaticText(self, -1, "Station Name")
self.label_3 = wx.StaticText(self, -1, "Program Type")
self.label_4 = wx.StaticText(self, -1, "PI")
self.label_5 = wx.StaticText(self, -1, "Radio Text")
self.label_6 = wx.StaticText(self, -1, "Clock Time")
self.label_7 = wx.StaticText(self, -1, "Alt. Frequencies")
self.frequency = wx.StaticText(self, -1, "xxx.xx")
self.station_name = wx.StaticText(self, -1, "xxxxxxxx")
self.program_type = wx.StaticText(self, -1, "xxxxxxxxxxx")
self.program_information = wx.StaticText(self, -1, "xxxx")
self.tp_flag = wx.StaticText(self, -1, "TP")
self.ta_flag = wx.StaticText(self, -1, "TA")
self.musicspeech_flag = wx.StaticText(self, -1, "MUS/SPE")
self.monostereo_flag = wx.StaticText(self, -1, "MN/ST")
self.artificialhead_flag = wx.StaticText(self, -1, "AH")
self.compressed_flag = wx.StaticText(self, -1, "CMP")
self.staticpty_flag = wx.StaticText(self, -1, "stPTY")
self.radiotext = wx.StaticText(self, -1, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
self.clocktime = wx.StaticText(self, -1, "xxxxxxxxxxxxxxxxxxxxx")
self.alt_freq = wx.StaticText(self, -1, "xxxxxxxxxxxxxxx")
self.__set_properties()
self.__do_layout()
if isinstance(freq, float) or isinstance(freq, int):
freq_str = "%.2f" % (float(freq) / 1e6)
else:
freq_str = str(freq)
self.frequency.SetLabel(freq_str)
EVT_DATA_EVENT (self, self.display_data)
def __set_properties(self):
font_bold = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, "")
font_normal = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "")
font_small = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, "")
self.frequency.SetFont(font_bold)
self.station_name.SetFont(font_bold)
self.program_type.SetFont(font_bold)
self.program_information.SetFont(font_bold)
self.tp_flag.SetFont(font_normal)
self.ta_flag.SetFont(font_normal)
self.musicspeech_flag.SetFont(font_normal)
self.monostereo_flag.SetFont(font_normal)
self.artificialhead_flag.SetFont(font_normal)
self.compressed_flag.SetFont(font_normal)
self.staticpty_flag.SetFont(font_normal)
self.radiotext.SetFont(font_small)
self.clocktime.SetFont(font_small)
self.alt_freq.SetFont(font_small)
def __do_layout(self):
sizer_0 = wx.BoxSizer(wx.VERTICAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
flag = wx.ALIGN_CENTER_VERTICAL|wx.LEFT
# arguments: window, proportion, flag, border
sizer_1.Add(self.label_1, 0, flag)
sizer_1.Add(self.frequency, 0, flag, 20)
sizer_1.Add(self.label_2, 0, flag, 20)
sizer_1.Add(self.station_name, 0, flag, 20)
sizer_1.Add(self.label_3, 0, flag, 20)
sizer_1.Add(self.program_type, 0, flag, 20)
sizer_1.Add(self.label_4, 0, flag, 20)
sizer_1.Add(self.program_information, 0, flag, 20)
sizer_0.Add(sizer_1, 1, wx.ALIGN_CENTER)
sizer_2.Add(self.tp_flag, 0, flag)
sizer_2.Add(self.ta_flag, 0, flag, 30)
sizer_2.Add(self.musicspeech_flag, 0, flag, 30)
sizer_2.Add(self.monostereo_flag, 0, flag, 30)
sizer_2.Add(self.artificialhead_flag, 0, flag, 30)
sizer_2.Add(self.compressed_flag, 0, flag, 30)
sizer_2.Add(self.staticpty_flag, 0, flag, 30)
sizer_0.Add(sizer_2, 1, wx.ALIGN_CENTER)
sizer_3.Add(self.label_6, 0, flag, 10)
self.clocktime.SetSizeHints(250, -1)
sizer_3.Add(self.clocktime, 0, flag, 10)
sizer_3.Add(self.label_7, 0, flag, 10)
self.alt_freq.SetSizeHints(200, -1)
sizer_3.Add(self.alt_freq, 0, flag, 10)
sizer_0.Add(sizer_3, 0, wx.ALIGN_CENTER)
sizer_4.Add(self.label_5, 0, flag)
sizer_4.Add(self.radiotext, 0, flag, 10)
sizer_0.Add(sizer_4, 0, wx.ALIGN_CENTER)
self.SetSizer(sizer_0)
def display_data(self, event):
msg_type = event.data[0]
msg = unicode(event.data[1], errors='replace')
if (msg_type==0): #program information
self.program_information.SetLabel(msg)
elif (msg_type==1): #station name
self.station_name.SetLabel(msg)
elif (msg_type==2): #program type
self.program_type.SetLabel(msg)
elif (msg_type==3): #flags
flags=msg
if (flags[0]=='1'):
self.tp_flag.SetForegroundColour(wx.RED)
else:
self.tp_flag.SetForegroundColour(wx.LIGHT_GREY)
if (flags[1]=='1'):
self.ta_flag.SetForegroundColour(wx.RED)
else:
self.ta_flag.SetForegroundColour(wx.LIGHT_GREY)
if (flags[2]=='1'):
self.musicspeech_flag.SetLabel("Music")
self.musicspeech_flag.SetForegroundColour(wx.RED)
else:
self.musicspeech_flag.SetLabel("Speech")
self.musicspeech_flag.SetForegroundColour(wx.RED)
if (flags[3]=='1'):
self.monostereo_flag.SetLabel("Mono")
self.monostereo_flag.SetForegroundColour(wx.RED)
else:
self.monostereo_flag.SetLabel("Stereo")
self.monostereo_flag.SetForegroundColour(wx.RED)
if (flags[4]=='1'):
self.artificialhead_flag.SetForegroundColour(wx.RED)
else:
self.artificialhead_flag.SetForegroundColour(wx.LIGHT_GREY)
if (flags[5]=='1'):
self.compressed_flag.SetForegroundColour(wx.RED)
else:
self.compressed_flag.SetForegroundColour(wx.LIGHT_GREY)
if (flags[6]=='1'):
self.staticpty_flag.SetForegroundColour(wx.RED)
else:
self.staticpty_flag.SetForegroundColour(wx.LIGHT_GREY)
elif (msg_type==4): #radiotext
self.radiotext.SetLabel(msg)
elif (msg_type==5): #clocktime
self.clocktime.SetLabel(msg)
elif (msg_type==6): #alternative frequencies
self.alt_freq.SetLabel(msg)
elif (msg_type==7): #update freq label
self.frequency.SetLabel(msg)
self.clear_data()
self.Layout()
def clear_data(self):
self.program_information.SetLabel("xxxx")
self.station_name.SetLabel("xxxxxxxx")
self.program_type.SetLabel("xxxxxxxxxxx")
self.ta_flag.SetForegroundColour(wx.BLACK)
self.tp_flag.SetForegroundColour(wx.BLACK)
self.musicspeech_flag.SetLabel("MUS/SPE")
self.musicspeech_flag.SetForegroundColour(wx.BLACK)
self.monostereo_flag.SetLabel("MN/ST")
self.monostereo_flag.SetForegroundColour(wx.BLACK)
self.artificialhead_flag.SetForegroundColour(wx.BLACK)
self.compressed_flag.SetForegroundColour(wx.BLACK)
self.staticpty_flag.SetForegroundColour(wx.BLACK)
self.radiotext.SetLabel("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
self.clocktime.SetLabel("xxxxxxxxxxxx")
self.alt_freq.SetLabel("xxxxxxxxxxxxxxxxx")
| gpl-2.0 | 5,637,691,268,546,287,000 | 33.717489 | 75 | 0.690519 | false |
terasaur/seedbank | src/seedbank/cli/create_command.py | 1 | 3141 | #
# Copyright 2012 ibiblio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#@PydevCodeAnalysisIgnore
import os
import libtorrent as lt
from seedbank.cli.command import Command
class CreateCommand(Command):
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
self._output_file = kwargs.get('output_file', None)
self._torrent_data = kwargs.get('torrent_data', None)
self._tracker = kwargs.get('tracker', None)
self._overwrite = bool(kwargs.get('overwrite', False))
self._show_progress = bool(kwargs.get('show_progress', False))
self._comment = kwargs.get('comment', None)
self._private = kwargs.get('private', False)
# TODO: look for tracker url(s) in self._config, CREATE_TORRENT_SECTION
def execute(self):
if not self._torrent_data:
raise ValueError('Missing file or directory path for torrent data')
if not self._output_file:
raise ValueError('Missing torrent output file')
if os.path.exists(self._output_file) and not self._overwrite:
self._println('Torrent file already exists')
return
input = os.path.abspath(self._torrent_data)
fs = lt.file_storage()
lt.add_files(fs, input)
if fs.num_files() == 0:
self._println('Error: no files added to torrent')
return
piece_length = self._calculate_piece_length()
#pad_size_limit = 4 * 1024 * 1024
pad_size_limit = -1
t = lt.create_torrent(fs, piece_length, pad_size_limit)
# TODO: support multiple tracker urls
if not self._tracker:
raise ValueError('Missing tracker URL')
t.add_tracker(self._tracker)
creator = 'terasaur seedbank %s' % lt.version
t.set_creator(creator)
if self._comment:
t.set_comment(self._comment)
t.set_priv(self._private)
data_dir = os.path.split(input)[0]
if self._show_progress:
lt.set_piece_hashes(t, data_dir, lambda x: self._print('.'))
self._println('')
else:
lt.set_piece_hashes(t, data_dir)
self._write_output_file(t, self._output_file)
def _calculate_piece_length(self):
# TODO: is the libtorrent algorithm a good long term solution?
# 32 K pieces
#return 32 * 1024
return 0
def _write_output_file(self, torrent, filepath):
self._println('Writing torrent file: %s' % (filepath))
data = lt.bencode(torrent.generate())
fp = open(filepath, 'wb')
fp.write(data)
fp.close()
| apache-2.0 | -1,144,621,993,123,599,500 | 34.292135 | 79 | 0.625597 | false |
museumvictoria/nodel-recipes | Group/script.py | 2 | 14096 | '''A node that groups members for control propagation and status monitoring - see script.py for notes'''
# For disappearing member support:
# (see readme, requires at least Nodel Host rev. 322 or later)
#
# - remote "Disappearing" signals should be wired to the actual signals
# - the usual remote signals should be wired to the respective "assumed" local signals respectively.
def main(arg=None):
for memberInfo in lookup_parameter('members') or []:
initMember(memberInfo)
console.info('Started!')
MODES = ['Action & Signal', 'Signal Only']
param_members = Parameter({'title': 'Members', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'name': {'title': 'Simple name (can assume context)', 'type': 'string', 'order': 1},
'isGroup': {'type': 'boolean', 'order': 2, 'title': 'Is a Group? (supports extended actions)'}, # this affects whether 'extended' remote arguments are to be used
'hasStatus': {'type': 'boolean', 'title': 'Provides Status?', 'order': 3},
'disappears': {'title': 'Disappears when Power Off? (usually on edge hosts)', 'type': 'boolean', 'order': 3.1},
'power': {'title': 'Power', 'type': 'object', 'order': 4, 'properties': {
'mode': {'title': 'Mode', 'type': 'string', 'enum': MODES}
}},
'muting': {'title': 'Muting', 'type': 'object', 'order': 5, 'properties': {
'mode': {'title': 'Mode', 'type': 'string', 'enum': MODES}
}}
}}}})
def initMember(memberInfo):
name = mustNotBeBlank('name', memberInfo['name'])
disappears = memberInfo.get('disappears')
isGroup = memberInfo.get('isGroup')
if (memberInfo.get('power') or {}).get('mode') in MODES:
initSignalSupport(name, memberInfo['power']['mode'], 'Power', ['On', 'Off'], disappears, isGroup)
if (memberInfo.get('muting') or {}).get('mode') in MODES:
initSignalSupport(name, memberInfo['muting']['mode'], 'Muting', ['On', 'Off'], disappears, isGroup)
# do status last because it depends on 'Power' when 'disappears' is in use
if memberInfo.get('hasStatus'):
initStatusSupport(name, disappears)
membersBySignal = {}
def initSignalSupport(name, mode, signalName, states, disappears, isGroup):
members = getMembersInfoOrRegister(signalName, name)
# establish local signals if haven't done so already
localDesiredSignal = lookup_local_event('Desired %s' % signalName)
if localDesiredSignal == None:
localDesiredSignal, localResultantSignal = initSignal(signalName, mode, states)
else:
localResultantSignal = lookup_local_event(signalName)
# establish a remote action
if mode == 'Action & Signal':
if not isGroup:
# for non-groups, just use simple remote actions
create_remote_action('Member %s %s' % (name, signalName), {'title': '"%s" %s' % (name, signalName), 'group': 'Members\' "%s"' % signalName, 'schema': {'type': 'string', 'enum': states}},
suggestedNode=name, suggestedAction=signalName)
else:
# for group member, add remote action to handle the 'propogation' flags
create_remote_action('Member %s %s Extended' % (name, signalName), {'title': '"%s" %s (extended)' % (name, signalName), 'group': 'Members (%s)' % signalName, 'schema': {'type': 'string', 'enum': states}},
suggestedNode=name, suggestedAction=signalName)
# establish a remote signal to receive status
# signal status states include 'Partially ...' forms
resultantStates = states + ['Partially %s' % s for s in states]
localMemberSignal = Event('Member %s %s' % (name, signalName), {'title': '"%s" %s' % (name, signalName), 'group': 'Members\' "%s"' % signalName, 'order': 9999+next_seq(), 'schema': {'type': 'string', 'enum': resultantStates}})
def aggregateMemberSignals():
shouldBeState = localDesiredSignal.getArg()
partially = False
for memberName in members:
if lookup_local_event('Member %s %s' % (memberName, signalName)).getArg() != shouldBeState:
partially = True
localResultantSignal.emit('Partially %s' % shouldBeState if partially else shouldBeState)
localMemberSignal.addEmitHandler(lambda arg: aggregateMemberSignals())
localDesiredSignal.addEmitHandler(lambda arg: aggregateMemberSignals())
def handleRemoteEvent(arg):
if arg == True or arg == 1:
arg = 'On'
elif arg == False or arg == 0:
arg = 'Off'
localMemberSignal.emit(arg)
create_remote_event('Member %s %s' % (name, signalName), handleRemoteEvent, {'title': '"%s" %s' % (name, signalName),'group': 'Members ("%s")' % signalName, 'order': next_seq(), 'schema': {'type': 'string', 'enum': resultantStates}},
suggestedNode=name, suggestedEvent=signalName)
if disappears:
prepareForDisappearingMemberSignal(name, signalName)
def initSignal(signalName, mode, states):
resultantStates = states + ['Partially %s' % s for s in states]
localDesiredSignal = Event('Desired %s' % signalName, {'group': '"%s"' % signalName, 'order': next_seq(), 'schema': {'type': 'string', 'enum': states}})
localResultantSignal = Event('%s' % signalName, {'group': '"%s"' % signalName, 'order': next_seq(), 'schema': {'type': 'string', 'enum': resultantStates}})
def handleComplexArg(complexArg):
state = complexArg['state']
noPropagate = complexArg.get('noPropagate')
localDesiredSignal.emit(state)
# for convenience, just emit the state as the status if no members are configured
if isEmpty(lookup_parameter('members')):
localResultantSignal.emit(state)
else:
if noPropagate:
return
for memberName in membersBySignal[signalName]:
remoteAction = lookup_remote_action('Member %s %s' % (memberName, signalName))
if remoteAction != None:
remoteAction.call(state)
remoteActionExtended = lookup_remote_action('Member %s %s Extended' % (memberName, signalName))
if remoteActionExtended != None:
remoteActionExtended.call(complexArg)
# create action
def handleSimpleOrComplexArg(arg):
if hasattr(arg, 'get'): # does it have the '.get' function i.e. dict/map-like
handleComplexArg(arg) # if so, assume it's complex and pass through
else:
handleComplexArg({'state': arg}) # else assume it's plain, wrap up in a complex arg
Action('%s' % signalName, handleSimpleOrComplexArg, {'group': '"%s"' % signalName, 'order': next_seq(), 'schema': {'type': 'string', 'enum': states}})
# create action with options (e.g. 'noPropagate')
Action('%s Extended' % signalName, handleComplexArg, {'group': '"%s" (extended)' % signalName, 'order': next_seq(), 'schema': {'type': 'object', 'properties': {
'state': {'type': 'string', 'enum': states, 'order': 3},
'noPropagate': {'type': 'boolean', 'order': 2}}}})
return localDesiredSignal, localResultantSignal
def getMembersInfoOrRegister(signalName, memberName):
members = membersBySignal.get(signalName)
if members == None:
members = list()
membersBySignal[signalName] = members
members.append(memberName)
return members
STATUS_SCHEMA = { 'type': 'object', 'properties': {
'level': { 'type': 'integer', 'order': 1 },
'message': {'type': 'string', 'order': 2 }
} }
EMPTY_SET = {}
def initStatusSupport(name, disappears):
# look up the members structure (assume
members = getMembersInfoOrRegister('Status', name)
# check if this node has a status yet
selfStatusSignal = lookup_local_event('Status')
if selfStatusSignal == None:
selfStatusSignal = Event('Status', {'group': 'Status', 'order': next_seq(), 'schema': STATUS_SCHEMA})
# status for the member
memberStatusSignal = Event('Member %s Status' % name, {'title': '"%s" Status' % name, 'group': 'Members\' Status', 'order': 9999+next_seq(), 'schema': STATUS_SCHEMA})
# suppression flag?
memberStatusSuppressedSignal = Event('Member %s Status Suppressed' % name, {'title': 'Suppress "%s" Status' % name, 'group': 'Status Suppression', 'order': 9999+next_seq(), 'schema': {'type': 'boolean'}})
Action('Member %s Status Suppressed' % name, lambda arg: memberStatusSuppressedSignal.emit(arg), {'title': 'Suppress "%s" Status' % name, 'group': 'Status Suppression', 'order': 9999+next_seq(), 'schema': {'type': 'boolean'}})
def aggregateMemberStatus():
aggregateLevel = 0
aggregateMessage = 'OK'
# for composing the aggegate message at the end
msgs = []
activeSuppression = False
for memberName in members:
suppressed = lookup_local_event('Member %s Status Suppressed' % memberName).getArg()
memberStatus = lookup_local_event('Member %s Status' % memberName).getArg() or EMPTY_SET
memberLevel = memberStatus.get('level')
if memberLevel == None: # as opposed to the value '0'
if suppressed:
activeSuppression = True
continue
memberLevel = 99
if memberLevel > aggregateLevel:
# raise the level (if not suppressed)
if suppressed:
activeSuppression = True
continue
aggregateLevel = memberLevel
memberMessage = memberStatus.get('message') or 'Has never been seen'
if memberLevel > 0:
if isBlank(memberMessage):
msgs.append(memberName)
else:
msgs.append('%s: [%s]' % (memberName, memberMessage))
if len(msgs) > 0:
aggregateMessage = ', '.join(msgs)
if activeSuppression:
aggregateMessage = '%s (*)' % aggregateMessage
selfStatusSignal.emit({'level': aggregateLevel, 'message': aggregateMessage})
memberStatusSignal.addEmitHandler(lambda arg: aggregateMemberStatus())
memberStatusSuppressedSignal.addEmitHandler(lambda arg: aggregateMemberStatus())
def handleRemoteEvent(arg):
memberStatusSignal.emit(arg)
create_remote_event('Member %s Status' % name, handleRemoteEvent, {'title': '"%s" Status' % name, 'group': 'Members (Status)', 'order': next_seq(), 'schema': STATUS_SCHEMA},
suggestedNode=name, suggestedEvent="Status")
if disappears:
prepareForDisappearingMemberStatus(name)
# members and status support ---!>
# <!--- disappearing members
# (for disappearing signals)
from org.nodel.core import BindingState
def prepareForDisappearingMemberStatus(name):
# lookup it's desired 'Power' signal
desiredPowerSignal = lookup_local_event('DesiredPower')
# create assumed status
assumedStatus = Event('Member %s Assumed Status' % name, { 'group': '(advanced)', 'order': next_seq(), 'schema': {'type': 'object', 'properties': {
'level': {'type': 'integer', 'order': 1},
'message': {'type': 'string', 'order': 2}}}})
# create volatile remote binding that just passes through the status anyway
disappearingRemoteStatus = create_remote_event('%s Disappearing Status' % name, lambda arg: assumedStatus.emit(arg))
# and when there's a wiring fault
def checkBindingState():
desiredPower = desiredPowerSignal.getArg()
wiringStatus = disappearingRemoteStatus.getStatus()
if desiredPower == 'On':
if wiringStatus != BindingState.Wired:
assumedStatus.emit({'level': 2, 'message': 'Power is supposed to be On - no confirmation of that.'})
else:
# wiringStatus is 'Wired', normal status can be passed through
remoteStatusArg = disappearingRemoteStatus.getArg()
if remoteStatusArg != None:
assumedStatus.emit(remoteStatusArg)
elif desiredPower == 'Off':
if wiringStatus == BindingState.Wired:
assumedStatus.emit({'level': 1, 'message': 'Power should be Off but appears to be alive'})
else:
# wiringStatus is not 'Wired'
assumedStatus.emit({'level': 0, 'message': 'OK'})
# check when the status binding state changes
disappearingRemoteStatus.addBindingStateHandler(lambda arg: checkBindingState())
# and when the power state changes
desiredPowerSignal.addEmitHandler(lambda arg: checkBindingState())
def prepareForDisappearingMemberSignal(name, signalName):
# lookup it's desired 'Power' signal
desiredPowerSignal = lookup_local_event('DesiredPower')
# create assumed signal
assumedSignal = Event('Member %s Assumed %s' % (name, signalName), { 'group': '(advanced)', 'order': next_seq(), 'schema': {'type': 'string'}})
# create volatile remote binding that just passes through the status anyway
disappearingRemoteSignal = create_remote_event('%s Disappearing %s' % (name, signalName), lambda arg: assumedSignal.emit(arg))
# and when there's a wiring fault
def checkBindingState():
desiredPower = desiredPowerSignal.getArg()
wiringStatus = disappearingRemoteSignal.getStatus()
if wiringStatus == BindingState.Wired:
# pass on the remote signal
remoteSignalArg = disappearingRemoteSignal.getArg()
if remoteSignalArg != None:
assumedSignal.emit(remoteSignalArg)
else:
# wiring status is NOT wired
if desiredPower == 'On':
assumedSignal.emit('Partially On')
elif desiredPower == 'Off':
assumedSignal.emit('Off')
# check when the status binding state changes
disappearingRemoteSignal.addBindingStateHandler(lambda arg: checkBindingState())
# and when the power state changes
desiredPowerSignal.addEmitHandler(lambda arg: checkBindingState())
# disappearing members ---!>
# <!--- convenience functions
def mustNotBeBlank(name, s):
if isBlank(s):
raise Exception('%s cannot be blank')
return s
def isBlank(s):
if s == None or len(s) == 0 or len(s.strip()) == 0:
return True
def isEmpty(o):
if o == None or len(o) == 0:
return True
# convenience functions ---!>
| mit | 1,293,657,446,865,031,400 | 39.739884 | 235 | 0.647134 | false |
NoahFlowa/glowing-spoon | venv/lib/python2.7/site-packages/pip/commands/hash.py | 514 | 1597 | from __future__ import absolute_import
import hashlib
import logging
import sys
from pip.basecommand import Command
from pip.status_codes import ERROR
from pip.utils import read_chunks
from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
name = 'hash'
usage = '%prog [options] <file> ...'
summary = 'Compute hashes of package archives.'
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
logger.info('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
| apache-2.0 | 9,102,590,725,559,541,000 | 27.017544 | 73 | 0.599875 | false |
colloquium/spacewalk | client/tools/rhncfg/config_management/rhncfg-manager.py | 1 | 1362 | #!/usr/bin/python
#
# Copyright (c) 2008 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
RHNROOT = '/usr/share/rhn'
import sys
if RHNROOT not in sys.path:
sys.path.append(RHNROOT)
from config_common.rhn_main import BaseMain
class Main(BaseMain):
modes = [
'add',
'create-channel',
'diff',
'diff-revisions',
'download-channel',
'get',
'list',
'list-channels',
'remove',
'remove-channel',
'revisions',
'update',
'upload-channel',
]
plugins_dir = 'config_management'
config_section = 'rhncfg-manager'
mode_prefix = 'rhncfg'
if __name__ == '__main__':
try:
sys.exit(Main().main() or 0)
except KeyboardInterrupt:
sys.stderr.write("user interrupted\n")
sys.exit(0)
| gpl-2.0 | -458,780,216,665,991,900 | 26.24 | 73 | 0.643906 | false |
rpspringuel/cluster | stats.py | 1 | 24321 | """Cluster _support which are method independant.
Routines which calculate _support for data and clustering results.
_support in this package do not care what the particluar clustering
methodology is.
A complete version history and licence and copyright information are located
in the source code.
"""
import numpy
from . import distances
import scipy
from . import _support
import warnings
def distancematrix(data,weights=None,dist='e',verbose=False):
"""Computes the distance matrix for a given data set.
The distance matrix indicates the distance between each data point in the
data set. Each entry is a distance, with it's indicies indicating which
data points are seperated by that distance, much like the distance chart
you might find on a map that lists the distance between cities. Thus the
matrix is symetric and all diaganol elements are 0. This function reduces
needed memory by only storing the lower half matrix, excluding the main
diaganol.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
distancematrix(numpy.transpose(data),...).
Parameters:
data : ndarray
Rank 2 array. Each row is assumed to represent a single data point.
weights : ndarray
Optional. If given, a rank 1 array with length equal to the number
of columns in data. Entries specify the weight for each dimension
in the distance function.
dist : string
Specifies the desired distance function by it's alias.
verbose : boolean
If true periodic updates on progress will be printed to the screen.
Returns:
dm : list of ndarray
Returns only the lower left half of the matrix, not including the
main diaganol. Upper right half elements are a mirror of the lower
left half and main diaganol elements are all zero. Thus, each row
has one more element than the one above, and the first row has no
elements.
See Also:
distances.distance
"""
dm = []
if verbose:
N = len(data)*(len(data)-1)/2
current = 0
n = 0
for i in range(len(data)):
row = []
for j in range(i):
row.append(distances.distance(data[i],data[j],weights,dist))
if verbose:
n += 1
if n*100/N > current:
current = n*100/N
print('%i%% complete' % current)
dm.append(numpy.array(row))
return dm
def fulldistancematrix(data,weights=None,dist='e',verbose=False):
"""Computes the distance matrix for a given data set.
Same as distancematrix but retruns the full distance matrix. Requires more
memory as a result, but can be easier to work with.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
fulldistancematrix(numpy.transpose(data),...).
Parameters:
data : ndarray
Rank 2 array. Each row is assumed to represent a single data point.
weights : ndarray
Optional. If given, a rank 1 array with length equal to the number
of columns in data. Entries specify the weight for each dimension
in the distance function.
dist : string
Specifies the desired distance function by it's alias.
verbose : boolean
If true periodic updates on progress will be printed to the screen.
Returns:
dm : ndarray
Returns the distance matrix for the data. This matrix is symmetric
and with all main diaganol elements equal to 0.
See Also:
distancematrix
"""
dm = numpy.zeros((len(data),len(data)))
if verbose:
N = len(data)*(len(data)-1)/2
current = 0
n = 0
for i in range(len(data)):
for j in range(i):
dm[j][i] = dm[i][j] = distances.distance(data[i],data[j],weights,dist)
if verbose:
n += 1
if n*100/N > current:
current = n*100/N
print('%i%% complete' % current)
return dm
def silhouette(point,levs,dm=None,data=None,weight=None,dist='e'):
"""Variation on the silhouette coefficient that works for fuzzy clustering.
The fuzzy silhouette coefficient is calculated by the following algorithm:
1. Convert the distances from the point to similarities.
2. Calculate the average similarity between the point and each cluster,
weighting by the probability of each point being in that cluster. Call the
set of these values s.
3. Multiply the values in s by the probability that the point is in that
cluster. Call these values a.
4. Multiply the values in s by the probability that the point is in each
cluster. For each cluster choose the maximum such value that cooresponds to
a different cluster. Call these values b.
5. The silhouette coefficients for each cluster is defined as
(a-b)/(1-min(a,b)) for the a and b value which corresponds to that cluster.
Note that in cases where the levs array represents an exclusive clustering
(i.e. each row has only one non-zero entry and that entry is 1) the
silhouette coefficients for clusters which the point is not a member of
should be 0 while for the cluster which the point is a member of should be
the normal silhouette coefficient. As a result, for exclusive clustering
solutions: sum(silhouette(...)) = old_silhouette(...).
Since the probability of two points is being in the same cluster is high
when the product of the appropriates levs entries is high, but when the
distance between those two points is low, we have to convert distances to
similarities to avoid messing up the max function built-in to the
definition of the silhouette coefficient. We do this by assuming that
s = 1 - d. However, we are still computing the distance version of the
silhouette coefficient. As a result, it is essential that normalized
distances be used.
The silhouette coefficient was originally defined in P. J. Rousseeuw,
Silhouettes: a graphical aid to the interpretation and validation of cluster
analysis, Journal of Computatonal and Applied Mathematics 20, 53 (1987),
URL http://dx.doi.org/10.1016/0377-0427(87)90125-7. The generalization here
to fuzzy clustering was made by R. P. Springuel and is unpublished.
Parameters:
point : integer
Indicates the row index within data for the point the
silhouette coefficent is being calculated for.
levs : ndarray
Rank 2 array contianing entries indicating the membership level of
each point in each cluster. levs[i][j] is the level to which the
ith data point belongs to the jth cluster.
dm : list of ndarrays or ndarray
Optional. The distance matrix for the data (i.e. the results of a
distancematrix or fulldistancematrix call). If not provided data is
required.
data : ndarray
Optional. The data set. Not required if dm is provided.
weight : ndarray
Optional. If given, a rank 1 array with length equal to the number
of columns in data. Entries specify the weight for each dimension
in the distance function. Not required if dm is provided.
dist : string
Optional. Specifies the distance function to use. Not required if
dm is provided.
Returns:
sil : float
The silhouette coefficient for the given point in the data set.
"""
if dm is None:
d = []
for i in range(len(data)):
d.append(distances.distance(data[point],data[i],weight,dist))
d = numpy.array(d)
elif type(dm) is list:
d = []
for i in range(len(dm)):
if i == point:
d.append(0)
else:
d.append(dm[max([i,point])][min([i,point])])
d = numpy.array(d)
else:
d = dm[point]
s = 1 - d
lev = levs[point].copy()
levs[point] = 0
s = numpy.sum(levs*s.reshape(len(s),1),axis=0)/numpy.sum(levs,axis=0)
s = numpy.outer(lev,s)
a = numpy.diagonal(s)
s = s - numpy.identity(len(s)) * numpy.diagonal(s)
b = s.max(axis=1)
sil = (a-b)/numpy.array([a,b]).max(axis=0)
sil = (a-b)/(1-numpy.array([a,b]).min(axis=0))
levs[point] = lev
return sil
def levscheck(levs,percision=15):
"""Check to see if a levs array is legal.
Checks to make sure that full list of weights for each data point in each
cluster has no values greater than 1 or less than 0, is properly normalized,
and that each cluster has at least one member but doesn't contain all data
points with weight 1.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
levscheck(numpy.transpose(levs),...).
Parameters:
levs : ndarray
Rank 2 array contianing entries indicating the membership level of
each point in each cluster. levs[i][j] is the level to which the
ith data point belongs to the jth cluster.
percision : integer
Number of decimal digits for the normalization test.
It was found during testing that excessive percision in levs could
lead to a false normalization error. I.e. even when the elements of
levs were defined by d/numpy.sum(d) for a particular data point,
this function would say that that data point was not normalized
under some circumstances. To alleviate this problem, a data point
is considered normalized when the sum of its levs values round to 1
at percision decimal places.
Returns:
result : boolean
True if levs is legal, False if it isn't
normal : list of integers
List of data points identified by their index in levs which do not
have normalized weights.
empty : list of integers
List of clusters identified by their index in levs which do not
have any members.
full : list of integers
List of clusters identified by their index in levs which contain
all data points with weight 1.
"""
test1 = numpy.round(numpy.sum(levs,axis=1),percision) == 1
test2 = numpy.sum(levs,axis=0)
test3 = numpy.all(0 <= levs) and numpy.all(levs <= 1)
normal = []
empty = []
full = []
if not all(test1):
for i in range(len(test1)):
if not test1[i]:
normal.append(i)
if not all(test2 > 0):
for i in range(len(test2)):
if test2[i] <= 0:
empty.append(i)
if not all(test2 < len(test1)):
for i in range(len(test2)):
if test2[i] >= len(test1):
full.append(i)
result = test3 and (len(normal) == 0) and (len(empty) == 0) and (len(full) == 0)
return result,normal,empty,full
def singleclustercentroid(data,lev,p=1.,method='a',weights=None,distancematrix=None):
"""Calculates the centroid of a cluster.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
singleclustercentroid(numpy.transpose(data),...).
Parameters:
data : ndarray
Rank 2 array containing the data set.
lev : ndarray
Rank 1 array that contains the list of weights that specify how
important a particular point is in determining the location of the
centroid. 0 means that the data point should have no impact, while
1 means that it should have maximal impact.
p : float
Determines the influence of the weights. Should be between 1 and
infinity. Values closer to 1 yield more distinct centroids.
Larger values lead to centroids which approach the global centroid.
Should always be 1 for exclusive clustering.
method : string
Specifies the method to be used for amalgamating the various
observations into a single value. Supported methods are:
a - arithmetic mean (default)
m - median
s - absolute mean
g - geometric mean
h - harmonic mean
q - quadratic mean
d - mode (results may be multivalued along some dimensions)
o* - medoid (point with smallest average distance to other points)
When specifying a medoid method the wildcard (*) should be one of
the available distance functions and weights should be given (if
appropriate). If distancematrix is given, then 'o' should be given
by itself (the wildcard will be ignored).
weights : ndarray
Optional. If given, a rank 1 array with length equal to the number
of rows in data. Entries specify the weight for each dimension in
the distance function. Only needed if a medoid method is being
used and dimensions are not equally weighted.
distancematrix : ndarray
Optional. Used to save time when calculating centroids using a
medoid (o) method. Passing distancematrix prevents this function
from calculating it and thus this option is mostly useful when
running several functions that require knowledge of the distance
matrix and would otherwise have to calculate it themselves.
Returns:
centroid : ndarray
Rank 1 array containing the centroid. If method = 'd' then dtype
for this array is object. Otherwise it is the same as dtype of
data or float, whichever is of higher order.
Notes:
Because this function only deals with one cluster (i.e. only one
row/column of the full levs matrix) it has no way of knowing if the
values in lev have been properly normalized (i.e. assigned so so that a
particular data point has a total weight of 1 when summed over all
clusters.
See Also:
distances.distance
"""
lev = lev[:,numpy.newaxis]
if method == 'a':
centroid = _support.mean(data,lev**p,axis=0,NN=False)
elif method == 'm':
centroid = _support.median(data,lev**p,axis=0,NN=False)
elif method == 's':
centroid = _support.absmean(data,lev**p,axis=0,NN=False)
elif method == 'g':
centroid = _support.geomean(data,lev**p,axis=0,NN=False)
elif method == 'h':
centroid = _support.harmean(data,lev**p,axis=0,NN=False)
elif method == 'q':
centroid = _support.quadmean(data,lev**p,axis=0,NN=False)
elif method == 'd':
centroid = _support.mode(data,lev**p,axis=0,NN=False)
elif method[0] == 'o':
if distancematrix is None:
d = fulldistancematrix(data,weights,method[1:])
else:
d = distancematrix
d = list(_support.mean(d,lev**p,axis=0,NN=False))
i = d.index(min(d))
centroid = data[i]
else:
raise ValueError('Method type unsupported.')
return centroid
def clustercentroids(data,levs,p=1.,method='a',weights=None,distancematrix=None):
"""Calculates the centroid of all clusters.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
numpy.transpose(clustercentroids(numpy.transpose(data),...).
Parameters:
data : ndarray
Rank 2 array containing the data set.
levs : ndarray
Rank 2 array indicating the membership level of each data point in
each cluster. levs[i][j] is the level to which the ith data point
belongs to the jth cluster.
p : float
Determines the influence of the weights. Should be between 1 and
infinity. Values closer to 1 yield more distinct centroids.
Larger values lead to centroids which approach the global centroid.
Should always be 1 for exclusive clustering.
method : character
Specifies the method used to find the centroid. See
singleclustercentroid for options.
weights : ndarray
Optional. If given, a rank 1 array with length equal to the number
of columns in data. Entries specify the weight for each dimension
in the distance function. Only needed if a medoid method is being
used and dimensions are not equally weighted.
distancematrix : ndarray
Optional. Used to save time when calculating centroids using a
medoid (o) method. Passing distancematrix prevents this function
from calculating it and thus this option is mostly useful when
running several functions that require knowledge of the distance
matrix and would otherwise have to calculate it themselves.
Returns:
cdata : ndarray or list of list of ndarray and ndarray
Rank 2 array containing the centroids. Each row is a centroid.
See Also:
singleclustercentroid
"""
cdata = []
check = levscheck(levs)
if not check[0]:
if len(check[1]) > 0:
warnings.warn('levs is not properly normalized.',UserWarning,stacklevel=2)
if len(check[2]) > 0:
warnings.warn('levs has empty clusters.',UserWarning,stacklevel=2)
if len(check[3]) > 0:
warnings.warn('levs has overfull clusters.',UserWarning,stacklevel=2)
if distancematrix is None and method[0] == 'o':
distancematrix = fulldistancematrix(data,weights,method[1:])
for i in range(len(levs[0])):
cdata.append(singleclustercentroid(data,levs[:,i],p,method,weights,distancematrix))
cdata = numpy.array(cdata)
return cdata
def SEmatrix(data,levs,p=1.,method='a',dist='e',weights=None,cdata=None,distancematrix=None,link='m'):
"""Calculates the squared error matrix by point and cluster.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
numpy.transpose(SEmatrix(numpy.transpose(data),...)).
Parameters:
data : ndarray
Rank 2 array containing the data set.
levs : ndarray
Rank 2 array indicating the membership level of each data point in
each cluster. levs[i][j] is the level to which the ith data point
belongs to the jth cluster.
p : float
Determines the influence of the weights. Should be between 1 and
infinity. Values closer to 1 yield more distinct centroids.
Larger values lead to centroids which approach the global centroid.
Should always be 1 for exclusive clustering.
method : character
Specifies the method used to find the centroid. See
singleclustercentroid for options. Not required if cdata is given.
dist : string
Specifies the distance function to use.
weights : ndarray
Optional. If given, a rank 1 array with length equal to the number
of rows in data. Entries specify the weight for each dimension in
the distance function. Only needed if a medoid method is being
used, dimensions are not equally weighted, and cdata is not given.
cdata : ndarray
Rank 2 array containing the centroids.
distancematrix : ndarray
The distance matrix for the data (i.e. the results of a
fulldistancematrix call). Only used to speed up the call of
clustercentroid when cdata is not given.
link : string
In cases where cdata contains at least one multi-modal centroid,
link is used to specify how the distance between each point and the
multi-modal centroid(s) is found. Possible options are:
m - maximum link (largest pair-wise distance, default)
s - single link (smallest pair-wise distance)
a - average link (average pair-wise distance)
Returns:
sse : ndarray
Rank 2 array containing the contribution to the sse for each
point/cluster contribution. Row indecies correspond to points
and column indecies to clusters.
"""
if cdata is None:
cdata = clustercentroids(data,levs,p,method,weights,distancematrix)
sse = numpy.zeros((len(data),len(levs[0])),dtype=float)
for i in range(len(data)):
if cdata.dtype.type is numpy.object_:
for j in range(len(cdata)):
k = list(map(len,cdata[j]))
index = numpy.zeros_like(k)
d = numpy.zeros(numpy.prod(k))
for n in range(len(d)):
cent = numpy.zeros(len(index))
for m in range(len(index)):
cent[m] = cdata[j,m][index[m]]
d[n] = distances.distance(data[i],cent,dist=dist)
index[0] += 1
for m in range(len(index)-1):
if index[m] == k[m]:
index[m] = 0
index[m+1] += 1
if link == 'm':
sse[i][j] += levs[i][j]**p*(d.max())**2
elif link == 's':
sse[i][j] += levs[i][j]**p*(d.min())**2
elif link == 'a':
sse[i][j] += levs[i][j]**p*(numpy.mean(d))**2
else:
raise ValueError('Link type not supported.')
else:
for j in range(len(cdata)):
sse[i][j] += levs[i][j]**p*(distances.distance(data[i],cdata[j],dist=dist))**2
return sse
def levscompare(levs1,levs2,rtol=1.0000000000000001e-005,atol=1e-008):
"""Compares two levs arrays to see if they are equivalent.
Since there should be no preference for which cluster comes "first" in a
levs array, it is possible for levs1 == levs2 or
numpy.allclose(levs1,levs2) to return False even when both levs arrays
correspond to the same clustering solution. However, given that the data
point order is unchanged, then the two levs arrays should only differ by
column swaps. Knowing this, this function compares the two levs arrays to
see if they are the same.
While the transpose parameter has been removed, the behavior formerly
obtained by setting transpose to True can be duplicated by the command
levscompare(numpy.transpose(levs1),numpy.transpose(levs2),...).
Parameters:
levs1, levs2 : ndarray
Rank 2 array indicating the membership level of each data point in
each cluster. Each is usually the result of a kmeans or cmeans run
with random starting conditions.
rtol : float
The allowable relative error in levs between elements in levs1 and
levs2.
atol : float
The allowable absolute error in levs between elements in levs1 and
levs2. levs1[:,i] is considered equal to levs2[:,j] when
(rtol*levs2[:,j])+atol < abs(levs1[:,i]-levs2[:,j])
Returns:
equiv : boolean
True if levs1 and levs2 correspond to the same clustering solution.
"""
if levs1.shape != levs2.shape:
equiv = False
else:
matches = []
for i in numpy.transpose(levs1):
for j in range(len(levs2[0])):
if numpy.allclose(i,levs2[:,j],rtol,atol) and j not in matches:
matches.append(j)
break
matches.sort()
equiv = matches == list(range(len(levs2[0])))
return equiv
| bsd-3-clause | -4,286,703,930,995,231,000 | 44.975425 | 102 | 0.629867 | false |
googleapis/python-dialogflow | google/cloud/dialogflow_v2beta1/types/document.py | 1 | 16280 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflow_v2beta1.types import gcs
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.v2beta1",
manifest={
"Document",
"GetDocumentRequest",
"ListDocumentsRequest",
"ListDocumentsResponse",
"CreateDocumentRequest",
"ImportDocumentsRequest",
"ImportDocumentTemplate",
"ImportDocumentsResponse",
"DeleteDocumentRequest",
"UpdateDocumentRequest",
"KnowledgeOperationMetadata",
"ReloadDocumentRequest",
},
)
class Document(proto.Message):
r"""A knowledge document to be used by a
[KnowledgeBase][google.cloud.dialogflow.v2beta1.KnowledgeBase].
For more information, see the `knowledge base
guide <https://cloud.google.com/dialogflow/docs/how/knowledge-bases>`__.
Note: The ``projects.agent.knowledgeBases.documents`` resource is
deprecated; only use ``projects.knowledgeBases.documents``.
Attributes:
name (str):
Optional. The document resource name. The name must be empty
when creating a document. Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>``.
display_name (str):
Required. The display name of the document.
The name must be 1024 bytes or less; otherwise,
the creation request fails.
mime_type (str):
Required. The MIME type of this document.
knowledge_types (Sequence[google.cloud.dialogflow_v2beta1.types.Document.KnowledgeType]):
Required. The knowledge type of document
content.
content_uri (str):
The URI where the file content is located.
For documents stored in Google Cloud Storage, these URIs
must have the form ``gs://<bucket-name>/<object-name>``.
NOTE: External URLs must correspond to public webpages,
i.e., they must be indexed by Google Search. In particular,
URLs for showing documents in Google Cloud Storage (i.e. the
URL in your browser) are not supported. Instead use the
``gs://`` format URI described above.
content (str):
The raw content of the document. This field is only
permitted for EXTRACTIVE_QA and FAQ knowledge types. Note:
This field is in the process of being deprecated, please use
raw_content instead.
raw_content (bytes):
The raw content of the document. This field is only
permitted for EXTRACTIVE_QA and FAQ knowledge types.
enable_auto_reload (bool):
Optional. If true, we try to automatically reload the
document every day (at a time picked by the system). If
false or unspecified, we don't try to automatically reload
the document.
Currently you can only enable automatic reload for documents
sourced from a public url, see ``source`` field for the
source types.
Reload status can be tracked in ``latest_reload_status``. If
a reload fails, we will keep the document unchanged.
If a reload fails with internal errors, the system will try
to reload the document on the next day. If a reload fails
with non-retriable errors (e.g. PERMISION_DENIED), the
system will not try to reload the document anymore. You need
to manually reload the document successfully by calling
``ReloadDocument`` and clear the errors.
latest_reload_status (google.cloud.dialogflow_v2beta1.types.Document.ReloadStatus):
Output only. The time and status of the
latest reload. This reload may have been
triggered automatically or manually and may not
have succeeded.
metadata (Sequence[google.cloud.dialogflow_v2beta1.types.Document.MetadataEntry]):
Optional. Metadata for the document. The metadata supports
arbitrary key-value pairs. Suggested use cases include
storing a document's title, an external URL distinct from
the document's content_uri, etc. The max size of a ``key``
or a ``value`` of the metadata is 1024 bytes.
"""
class KnowledgeType(proto.Enum):
r"""The knowledge type of document content."""
KNOWLEDGE_TYPE_UNSPECIFIED = 0
FAQ = 1
EXTRACTIVE_QA = 2
ARTICLE_SUGGESTION = 3
SMART_REPLY = 4
class ReloadStatus(proto.Message):
r"""The status of a reload attempt.
Attributes:
time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time of a reload attempt.
This reload may have been triggered
automatically or manually and may not have
succeeded.
status (google.rpc.status_pb2.Status):
Output only. The status of a reload attempt
or the initial load.
"""
time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
status = proto.Field(proto.MESSAGE, number=2, message=status_pb2.Status,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
mime_type = proto.Field(proto.STRING, number=3,)
knowledge_types = proto.RepeatedField(proto.ENUM, number=4, enum=KnowledgeType,)
content_uri = proto.Field(proto.STRING, number=5, oneof="source",)
content = proto.Field(proto.STRING, number=6, oneof="source",)
raw_content = proto.Field(proto.BYTES, number=9, oneof="source",)
enable_auto_reload = proto.Field(proto.BOOL, number=11,)
latest_reload_status = proto.Field(proto.MESSAGE, number=12, message=ReloadStatus,)
metadata = proto.MapField(proto.STRING, proto.STRING, number=7,)
class GetDocumentRequest(proto.Message):
r"""Request message for
[Documents.GetDocument][google.cloud.dialogflow.v2beta1.Documents.GetDocument].
Attributes:
name (str):
Required. The name of the document to retrieve. Format
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListDocumentsRequest(proto.Message):
r"""Request message for
[Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments].
Attributes:
parent (str):
Required. The knowledge base to list all documents for.
Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 10 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
filter (str):
The filter expression used to filter documents returned by
the list method. The expression has the following syntax:
[AND ] ...
The following fields and operators are supported:
- knowledge_types with has(:) operator
- display_name with has(:) operator
- state with equals(=) operator
Examples:
- "knowledge_types:FAQ" matches documents with FAQ
knowledge type.
- "display_name:customer" matches documents whose display
name contains "customer".
- "state=ACTIVE" matches documents with ACTIVE state.
- "knowledge_types:FAQ AND state=ACTIVE" matches all active
FAQ documents.
For more information about filtering, see `API
Filtering <https://aip.dev/160>`__.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
class ListDocumentsResponse(proto.Message):
r"""Response message for
[Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments].
Attributes:
documents (Sequence[google.cloud.dialogflow_v2beta1.types.Document]):
The list of documents.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
documents = proto.RepeatedField(proto.MESSAGE, number=1, message="Document",)
next_page_token = proto.Field(proto.STRING, number=2,)
class CreateDocumentRequest(proto.Message):
r"""Request message for
[Documents.CreateDocument][google.cloud.dialogflow.v2beta1.Documents.CreateDocument].
Attributes:
parent (str):
Required. The knowledge base to create a document for.
Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>``.
document (google.cloud.dialogflow_v2beta1.types.Document):
Required. The document to create.
import_gcs_custom_metadata (bool):
Whether to import custom metadata from Google
Cloud Storage. Only valid when the document
source is Google Cloud Storage URI.
"""
parent = proto.Field(proto.STRING, number=1,)
document = proto.Field(proto.MESSAGE, number=2, message="Document",)
import_gcs_custom_metadata = proto.Field(proto.BOOL, number=3,)
class ImportDocumentsRequest(proto.Message):
r"""Request message for
[Documents.ImportDocuments][google.cloud.dialogflow.v2beta1.Documents.ImportDocuments].
Attributes:
parent (str):
Required. The knowledge base to import documents into.
Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>``.
gcs_source (google.cloud.dialogflow_v2beta1.types.GcsSources):
The Google Cloud Storage location for the documents. The
path can include a wildcard.
These URIs may have the forms
``gs://<bucket-name>/<object-name>``.
``gs://<bucket-name>/<object-path>/*.<extension>``.
document_template (google.cloud.dialogflow_v2beta1.types.ImportDocumentTemplate):
Required. Document template used for
importing all the documents.
import_gcs_custom_metadata (bool):
Whether to import custom metadata from Google
Cloud Storage. Only valid when the document
source is Google Cloud Storage URI.
"""
parent = proto.Field(proto.STRING, number=1,)
gcs_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message=gcs.GcsSources,
)
document_template = proto.Field(
proto.MESSAGE, number=3, message="ImportDocumentTemplate",
)
import_gcs_custom_metadata = proto.Field(proto.BOOL, number=4,)
class ImportDocumentTemplate(proto.Message):
r"""The template used for importing documents.
Attributes:
mime_type (str):
Required. The MIME type of the document.
knowledge_types (Sequence[google.cloud.dialogflow_v2beta1.types.Document.KnowledgeType]):
Required. The knowledge type of document
content.
metadata (Sequence[google.cloud.dialogflow_v2beta1.types.ImportDocumentTemplate.MetadataEntry]):
Metadata for the document. The metadata supports arbitrary
key-value pairs. Suggested use cases include storing a
document's title, an external URL distinct from the
document's content_uri, etc. The max size of a ``key`` or a
``value`` of the metadata is 1024 bytes.
"""
mime_type = proto.Field(proto.STRING, number=1,)
knowledge_types = proto.RepeatedField(
proto.ENUM, number=2, enum="Document.KnowledgeType",
)
metadata = proto.MapField(proto.STRING, proto.STRING, number=3,)
class ImportDocumentsResponse(proto.Message):
r"""Response message for
[Documents.ImportDocuments][google.cloud.dialogflow.v2beta1.Documents.ImportDocuments].
Attributes:
warnings (Sequence[google.rpc.status_pb2.Status]):
Includes details about skipped documents or
any other warnings.
"""
warnings = proto.RepeatedField(proto.MESSAGE, number=1, message=status_pb2.Status,)
class DeleteDocumentRequest(proto.Message):
r"""Request message for
[Documents.DeleteDocument][google.cloud.dialogflow.v2beta1.Documents.DeleteDocument].
Attributes:
name (str):
Required. The name of the document to delete. Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateDocumentRequest(proto.Message):
r"""Request message for
[Documents.UpdateDocument][google.cloud.dialogflow.v2beta1.Documents.UpdateDocument].
Attributes:
document (google.cloud.dialogflow_v2beta1.types.Document):
Required. The document to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Not specified means ``update all``. Currently,
only ``display_name`` can be updated, an InvalidArgument
will be returned for attempting to update other fields.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class KnowledgeOperationMetadata(proto.Message):
r"""Metadata in google::longrunning::Operation for Knowledge
operations.
Attributes:
state (google.cloud.dialogflow_v2beta1.types.KnowledgeOperationMetadata.State):
Required. Output only. The current state of
this operation.
"""
class State(proto.Enum):
r"""States of the operation."""
STATE_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
state = proto.Field(proto.ENUM, number=1, enum=State,)
class ReloadDocumentRequest(proto.Message):
r"""Request message for
[Documents.ReloadDocument][google.cloud.dialogflow.v2beta1.Documents.ReloadDocument].
Attributes:
name (str):
Required. The name of the document to reload. Format:
``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>``
gcs_source (google.cloud.dialogflow_v2beta1.types.GcsSource):
The path for a Cloud Storage source file for
reloading document content. If not provided, the
Document's existing source will be reloaded.
import_gcs_custom_metadata (bool):
Whether to import custom metadata from Google
Cloud Storage. Only valid when the document
source is Google Cloud Storage URI.
"""
name = proto.Field(proto.STRING, number=1,)
gcs_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message=gcs.GcsSource,
)
import_gcs_custom_metadata = proto.Field(proto.BOOL, number=4,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -2,505,329,625,319,576,600 | 39.197531 | 121 | 0.656388 | false |
emonty/ansible | test/units/playbook/test_playbook.py | 58 | 2089 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.errors import AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars.manager import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml": """
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
| gpl-3.0 | -3,941,936,610,408,545,000 | 33.245902 | 94 | 0.661082 | false |
home-assistant/home-assistant | homeassistant/components/shelly/logbook.py | 8 | 1069 | """Describe Shelly logbook events."""
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import callback
from .const import (
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
ATTR_DEVICE,
DOMAIN,
EVENT_SHELLY_CLICK,
)
from .utils import get_device_name, get_device_wrapper
@callback
def async_describe_events(hass, async_describe_event):
"""Describe logbook events."""
@callback
def async_describe_shelly_click_event(event):
"""Describe shelly.click logbook event."""
wrapper = get_device_wrapper(hass, event.data[ATTR_DEVICE_ID])
if wrapper:
device_name = get_device_name(wrapper.device)
else:
device_name = event.data[ATTR_DEVICE]
channel = event.data[ATTR_CHANNEL]
click_type = event.data[ATTR_CLICK_TYPE]
return {
"name": "Shelly",
"message": f"'{click_type}' click event for {device_name} channel {channel} was fired.",
}
async_describe_event(DOMAIN, EVENT_SHELLY_CLICK, async_describe_shelly_click_event)
| apache-2.0 | -3,139,449,943,533,126,000 | 27.891892 | 100 | 0.652011 | false |
alexsmx/djangoAppengineSrcTemplate | django/contrib/messages/storage/session.py | 456 | 1213 | from django.contrib.messages.storage.base import BaseStorage
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.request.session.get(self.session_key), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = messages
else:
self.request.session.pop(self.session_key, None)
return []
| bsd-3-clause | -2,354,885,119,528,291,300 | 35.757576 | 78 | 0.620775 | false |
onecloud/neutron | neutron/plugins/ml2/db.py | 13 | 5143 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
def add_network_segment(session, network_id, segment):
with session.begin(subtransactions=True):
record = models.NetworkSegment(
id=uuidutils.generate_uuid(),
network_id=network_id,
network_type=segment.get(api.NETWORK_TYPE),
physical_network=segment.get(api.PHYSICAL_NETWORK),
segmentation_id=segment.get(api.SEGMENTATION_ID)
)
session.add(record)
LOG.info(_("Added segment %(id)s of type %(network_type)s for network"
" %(network_id)s"),
{'id': record.id,
'network_type': record.network_type,
'network_id': record.network_id})
def get_network_segments(session, network_id):
with session.begin(subtransactions=True):
records = (session.query(models.NetworkSegment).
filter_by(network_id=network_id))
return [{api.ID: record.id,
api.NETWORK_TYPE: record.network_type,
api.PHYSICAL_NETWORK: record.physical_network,
api.SEGMENTATION_ID: record.segmentation_id}
for record in records]
def ensure_port_binding(session, port_id):
with session.begin(subtransactions=True):
try:
record = (session.query(models.PortBinding).
filter_by(port_id=port_id).
one())
except exc.NoResultFound:
record = models.PortBinding(
port_id=port_id,
vif_type=portbindings.VIF_TYPE_UNBOUND)
session.add(record)
return record
def get_port(session, port_id):
"""Get port record for update within transcation."""
with session.begin(subtransactions=True):
try:
record = (session.query(models_v2.Port).
filter(models_v2.Port.id.startswith(port_id)).
one())
return record
except exc.NoResultFound:
return
except exc.MultipleResultsFound:
LOG.error(_("Multiple ports have port_id starting with %s"),
port_id)
return
def get_port_from_device_mac(device_mac):
LOG.debug(_("get_port_from_device_mac() called for mac %s"), device_mac)
session = db_api.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def get_port_and_sgs(port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db_api.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_binding_host(port_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = (session.query(models.PortBinding).
filter(models.PortBinding.port_id.startswith(port_id)).
one())
except exc.NoResultFound:
LOG.debug(_("No binding found for port %(port_id)s"),
{'port_id': port_id})
return
return query.host
| apache-2.0 | -7,846,316,465,486,387,000 | 36.816176 | 79 | 0.616955 | false |
i-DAT-Qualia/Qualia-AC | qualia/tools/api.py | 3 | 1406 | from tastypie.serializers import Serializer
from tastypie.authentication import Authentication
from tastypie.http import HttpUnauthorized
from django.utils.timezone import is_naive
class ISOSerializer(Serializer):
"""
Our own serializer to format datetimes in ISO 8601 but with timezone
offset.
"""
def format_datetime(self, data):
# If naive or rfc-2822, default behavior...
if is_naive(data) or self.datetime_formatting == 'rfc-2822':
return super(ISOSerializer, self).format_datetime(data)
return data.isoformat()
class KeyOnlyAuthentication(Authentication):
'''
Authorises API calls using just the API Key - Likely not perfect,
but reduces complexity for end developer.
'''
def _unauthorized(self):
return HttpUnauthorized()
def is_authenticated(self, request, **kwargs):
from tastypie.models import ApiKey
api_key = None
try:
if request.GET:
api_key = request.GET.get('api_key')
elif request.POST:
api_key = request.POST.get('api_key')
if api_key:
key = ApiKey.objects.get(key=api_key)
request.user = key.user
else:
return self._unauthorized()
except ApiKey.DoesNotExist:
return self._unauthorized()
return True | gpl-3.0 | 7,908,463,362,962,133,000 | 29.586957 | 72 | 0.622333 | false |
andreaso/ansible | lib/ansible/utils/module_docs_fragments/aws.py | 135 | 3168 | # (c) 2014, Will Thames <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = """
options:
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
Ignored for modules where region is required. Must be specified for all other modules if region is not used.
If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
required: false
default: null
aliases: [ 'access_token' ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
profile:
description:
- Uses a boto profile. Only works with boto >= 2.24.0.
required: false
default: null
aliases: []
version_added: "1.6"
requirements:
- "python >= 2.6"
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
"""
| gpl-3.0 | -7,619,411,816,202,523,000 | 38.111111 | 143 | 0.697285 | false |
tenvick/hugula | Client/tools/site-packages/PIL/BufrStubImagePlugin.py | 14 | 1548 | #
# The Python Imaging Library
# $Id: BufrStubImagePlugin.py 2134 2004-10-06 08:55:20Z fredrik $
#
# BUFR stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
_handler = None
##
# Install application-specific BUFR image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:4] == "BUFR" or prefix[:4] == "ZCZC"
class BufrStubImageFile(ImageFile.StubImageFile):
format = "BUFR"
format_description = "BUFR"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
raise SyntaxError("Not a BUFR file")
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("BUFR save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
Image.register_save(BufrStubImageFile.format, _save)
Image.register_extension(BufrStubImageFile.format, ".bufr")
| mit | 946,857,316,286,926,600 | 21.764706 | 73 | 0.597545 | false |
ray-zhong/github_trend_spider | ENV/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 360 | 2852 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| mit | 6,994,113,337,796,225,000 | 28.708333 | 84 | 0.719144 | false |
mulkieran/justbytes | src/justbytes/_constants.py | 1 | 4224 | # Copyright (C) 2015 - 2019 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; If not, see <http://www.gnu.org/licenses/>.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
""" Constants used by the justbytes package.
Categories of constants:
* Rounding methods
* Size units, e.g., Ki, Mi
"""
# isort: STDLIB
import abc
from numbers import Rational
# isort: FIRSTPARTY
import justbases
from ._errors import RangeValueError
RoundingMethods = justbases.RoundingMethods
class Unit:
""" Class to encapsulate unit information. """
# pylint: disable=too-few-public-methods
def __init__(self, factor, prefix, abbr):
self._factor = factor
self._prefix = prefix
self._abbr = abbr
def __str__(self):
return self.abbr + "B"
__repr__ = __str__
# pylint: disable=protected-access
factor = property(lambda s: s._factor, doc="numeric multiple of bytes")
abbr = property(lambda s: s._abbr, doc="abbreviation for unit, precedes 'B'")
prefix = property(lambda s: s._prefix, doc="prefix for 'bytes'")
def __int__(self):
return self.factor
B = Unit(1, "", "")
""" The universal unit, bytes. """
class Units(metaclass=abc.ABCMeta):
"""
Generic class for units.
"""
# pylint: disable=too-few-public-methods
FACTOR = abc.abstractproperty(doc="factor for each unit")
_UNITS = abc.abstractproperty(doc="ordered list of units")
_MAX_EXPONENT = None
@classmethod
def UNITS(cls):
"""
Units of this class.
"""
return cls._UNITS[:]
@classmethod
def unit_for_exp(cls, exponent):
"""
Get the unit for the given exponent.
:param int exponent: the exponent, 0 <= exponent < len(UNITS())
"""
if exponent < 0 or exponent > cls.max_exponent():
raise RangeValueError(exponent, "exponent", "no corresponding unit")
if exponent == 0:
return B
return cls._UNITS[exponent - 1]
@classmethod
def max_exponent(cls):
"""
The maximum exponent for which there is a unit.
:returns: the maximum exponent
:rtype: int
"""
if cls._MAX_EXPONENT is None:
cls._MAX_EXPONENT = len(cls._UNITS)
return cls._MAX_EXPONENT
class DecimalUnits(Units):
""" Class to store decimal unit constants. """
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
FACTOR = 10 ** 3
KB = Unit(FACTOR ** 1, "kilo", "k")
MB = Unit(FACTOR ** 2, "mega", "M")
GB = Unit(FACTOR ** 3, "giga", "G")
TB = Unit(FACTOR ** 4, "tera", "T")
PB = Unit(FACTOR ** 5, "peta", "P")
EB = Unit(FACTOR ** 6, "exa", "E")
ZB = Unit(FACTOR ** 7, "zetta", "Z")
YB = Unit(FACTOR ** 8, "yotta", "Y")
_UNITS = [KB, MB, GB, TB, PB, EB, ZB, YB]
class BinaryUnits(Units):
""" Class to store binary unit constants. """
# pylint: disable=too-few-public-methods
FACTOR = 2 ** 10
KiB = Unit(FACTOR ** 1, "kibi", "Ki")
MiB = Unit(FACTOR ** 2, "mebi", "Mi")
GiB = Unit(FACTOR ** 3, "gibi", "Gi")
TiB = Unit(FACTOR ** 4, "tebi", "Ti")
PiB = Unit(FACTOR ** 5, "pebi", "Pi")
EiB = Unit(FACTOR ** 6, "exbi", "Ei")
ZiB = Unit(FACTOR ** 7, "zebi", "Zi")
YiB = Unit(FACTOR ** 8, "yobi", "Yi")
_UNITS = [KiB, MiB, GiB, TiB, PiB, EiB, ZiB, YiB]
def UNITS():
""" All unit constants. """
return [B] + BinaryUnits.UNITS() + DecimalUnits.UNITS()
ROUNDING_METHODS = RoundingMethods.METHODS
PRECISE_NUMERIC_TYPES = (int, Rational)
UNIT_TYPES = tuple(list(PRECISE_NUMERIC_TYPES) + [Unit])
| gpl-2.0 | 8,119,109,462,130,799,000 | 25.4 | 81 | 0.611742 | false |
akaminsky/ghost_blog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_lassobuiltins.py | 293 | 130675 | # -*- coding: utf-8 -*-
"""
pygments.lexers._lassobuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in Lasso types, traits, methods, and members.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTINS = {
'Types': [
'null',
'void',
'tag',
'trait',
'integer',
'decimal',
'boolean',
'capture',
'string',
'bytes',
'keyword',
'custom',
'staticarray',
'signature',
'memberstream',
'dsinfo',
'sourcefile',
'array',
'pair',
'opaque',
'filedesc',
'dirdesc',
'locale',
'ucal',
'xml_domimplementation',
'xml_node',
'xml_characterdata',
'xml_document',
'xml_element',
'xml_attr',
'xml_text',
'xml_cdatasection',
'xml_entityreference',
'xml_entity',
'xml_processinginstruction',
'xml_comment',
'xml_documenttype',
'xml_documentfragment',
'xml_notation',
'xml_nodelist',
'xml_namednodemap',
'xml_namednodemap_ht',
'xml_namednodemap_attr',
'xmlstream',
'sqlite3',
'sqlite3_stmt',
'mime_reader',
'curltoken',
'regexp',
'zip_impl',
'zip_file_impl',
'library_thread_loader',
'generateforeachunkeyed',
'generateforeachkeyed',
'eacher',
'queriable_where',
'queriable_select',
'queriable_selectmany',
'queriable_groupby',
'queriable_join',
'queriable_groupjoin',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_skip',
'queriable_take',
'queriable_grouping',
'generateseries',
'tie',
'pairup',
'delve',
'repeat',
'pair_compare',
'serialization_object_identity_compare',
'serialization_element',
'serialization_writer_standin',
'serialization_writer_ref',
'serialization_writer',
'serialization_reader',
'tree_nullnode',
'tree_node',
'tree_base',
'map_node',
'map',
'file',
'date',
'dir',
'magick_image',
'ldap',
'os_process',
'java_jnienv',
'jobject',
'jmethodid',
'jfieldid',
'database_registry',
'sqlite_db',
'sqlite_results',
'sqlite_currentrow',
'sqlite_table',
'sqlite_column',
'curl',
'debugging_stack',
'dbgp_server',
'dbgp_packet',
'duration',
'inline_type',
'json_literal',
'json_object',
'list_node',
'list',
'jchar',
'jchararray',
'jbyte',
'jbytearray',
'jfloat',
'jint',
'jshort',
'currency',
'scientific',
'percent',
'dateandtime',
'timeonly',
'net_tcp',
'net_tcpssl',
'net_named_pipe',
'net_udppacket',
'net_udp',
'pdf_typebase',
'pdf_doc',
'pdf_color',
'pdf_barcode',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_hyphenator',
'pdf_chunk',
'pdf_phrase',
'pdf_paragraph',
'queue',
'set',
'sys_process',
'worker_pool',
'zip_file',
'zip',
'cache_server_element',
'cache_server',
'dns_response',
'component_render_state',
'component',
'component_container',
'document_base',
'document_body',
'document_header',
'text_document',
'data_document',
'email_compose',
'email_pop',
'email_parse',
'email_queue_impl_base',
'email_stage_impl_base',
'fcgi_record',
'web_request_impl',
'fcgi_request',
'include_cache',
'atbegin',
'fastcgi_each_fcgi_param',
'fastcgi_server',
'filemaker_datasource',
'http_document',
'http_document_header',
'http_header_field',
'html_document_head',
'html_document_body',
'raw_document_body',
'bytes_document_body',
'html_attr',
'html_atomic_element',
'html_container_element',
'http_error',
'html_script',
'html_text',
'html_raw',
'html_binary',
'html_json',
'html_cdata',
'html_eol',
'html_div',
'html_span',
'html_br',
'html_hr',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_meta',
'html_link',
'html_object',
'html_style',
'html_base',
'html_table',
'html_tr',
'html_td',
'html_th',
'html_img',
'html_form',
'html_fieldset',
'html_legend',
'html_input',
'html_label',
'html_option',
'html_select',
'http_server_web_connection',
'http_server',
'http_server_connection_handler',
'image',
'lassoapp_installer',
'lassoapp_content_rep_halt',
'lassoapp_dirsrc_fileresource',
'lassoapp_dirsrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_livesrc_appsource',
'lassoapp_long_expiring_bytes',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_fileresource',
'lassoapp_zipsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_compiledsrc_appsource',
'lassoapp_manualsrc_appsource',
'log_impl_base',
'portal_impl',
'security_registry',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'mysql_session_driver_impl',
'odbc_session_driver_impl',
'session_delete_expired_thread',
'email_smtp',
'client_address',
'client_ip',
'web_node_base',
'web_node_root',
'web_node_content_representation_xhr_container',
'web_node_content_representation_html_specialized',
'web_node_content_representation_css_specialized',
'web_node_content_representation_js_specialized',
'web_node_echo',
'web_error_atend',
'web_response_impl',
'web_router'
],
'Traits': [
'trait_asstring',
'any',
'trait_generator',
'trait_decompose_assignment',
'trait_foreach',
'trait_generatorcentric',
'trait_foreachtextelement',
'trait_finite',
'trait_finiteforeach',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_frontended',
'trait_backended',
'trait_doubleended',
'trait_positionallykeyed',
'trait_expandable',
'trait_frontexpandable',
'trait_backexpandable',
'trait_contractible',
'trait_frontcontractible',
'trait_backcontractible',
'trait_fullymutable',
'trait_keyedmutable',
'trait_endedfullymutable',
'trait_setoperations',
'trait_searchable',
'trait_positionallysearchable',
'trait_pathcomponents',
'trait_readbytes',
'trait_writebytes',
'trait_setencoding',
'trait_readstring',
'trait_writestring',
'trait_hashable',
'trait_each_sub',
'trait_stack',
'trait_list',
'trait_array',
'trait_map',
'trait_close',
'trait_file',
'trait_scalar',
'trait_queriablelambda',
'trait_queriable',
'queriable_asstring',
'trait_serializable',
'trait_treenode',
'trait_json_serialize',
'formattingbase',
'trait_net',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'web_connection',
'html_element_coreattrs',
'html_element_i18nattrs',
'html_element_eventsattrs',
'html_attributed',
'lassoapp_resource',
'lassoapp_source',
'lassoapp_capabilities',
'session_driver',
'web_node_content_json_specialized',
'web_node',
'web_node_container',
'web_node_content_representation',
'web_node_content',
'web_node_content_document',
'web_node_postable',
'web_node_content_html_specialized',
'web_node_content_css_specialized',
'web_node_content_js_specialized'
],
'Unbound Methods': [
'fail_now',
'register',
'register_thread',
'escape_tag',
'handle',
'handle_failure',
'protect_now',
'threadvar_get',
'threadvar_set',
'threadvar_set_asrt',
'threadvar_find',
'abort_now',
'abort_clear',
'failure_clear',
'var_keys',
'var_values',
'staticarray_join',
'suspend',
'main_thread_only',
'split_thread',
'capture_nearestloopcount',
'capture_nearestloopcontinue',
'capture_nearestloopabort',
'io_file_o_rdonly',
'io_file_o_wronly',
'io_file_o_rdwr',
'io_file_o_nonblock',
'io_file_o_sync',
'io_file_o_shlock',
'io_file_o_exlock',
'io_file_o_async',
'io_file_o_fsync',
'io_file_o_nofollow',
'io_file_s_irwxu',
'io_file_s_irusr',
'io_file_s_iwusr',
'io_file_s_ixusr',
'io_file_s_irwxg',
'io_file_s_irgrp',
'io_file_s_iwgrp',
'io_file_s_ixgrp',
'io_file_s_irwxo',
'io_file_s_iroth',
'io_file_s_iwoth',
'io_file_s_ixoth',
'io_file_s_isuid',
'io_file_s_isgid',
'io_file_s_isvtx',
'io_file_s_ifmt',
'io_file_s_ifchr',
'io_file_s_ifdir',
'io_file_s_ifreg',
'io_file_o_append',
'io_file_o_creat',
'io_file_o_trunc',
'io_file_o_excl',
'io_file_seek_set',
'io_file_seek_cur',
'io_file_seek_end',
'io_file_s_ififo',
'io_file_s_ifblk',
'io_file_s_iflnk',
'io_file_s_ifsock',
'io_net_shut_rd',
'io_net_shut_wr',
'io_net_shut_rdwr',
'io_net_sock_stream',
'io_net_sock_dgram',
'io_net_sock_raw',
'io_net_sock_rdm',
'io_net_sock_seqpacket',
'io_net_so_debug',
'io_net_so_acceptconn',
'io_net_so_reuseaddr',
'io_net_so_keepalive',
'io_net_so_dontroute',
'io_net_so_broadcast',
'io_net_so_useloopback',
'io_net_so_linger',
'io_net_so_oobinline',
'io_net_so_timestamp',
'io_net_so_sndbuf',
'io_net_so_rcvbuf',
'io_net_so_sndlowat',
'io_net_so_rcvlowat',
'io_net_so_sndtimeo',
'io_net_so_rcvtimeo',
'io_net_so_error',
'io_net_so_type',
'io_net_sol_socket',
'io_net_af_unix',
'io_net_af_inet',
'io_net_af_inet6',
'io_net_ipproto_ip',
'io_net_ipproto_udp',
'io_net_msg_peek',
'io_net_msg_oob',
'io_net_msg_waitall',
'io_file_fioclex',
'io_file_fionclex',
'io_file_fionread',
'io_file_fionbio',
'io_file_fioasync',
'io_file_fiosetown',
'io_file_fiogetown',
'io_file_fiodtype',
'io_file_f_dupfd',
'io_file_f_getfd',
'io_file_f_setfd',
'io_file_f_getfl',
'io_file_f_setfl',
'io_file_f_getlk',
'io_file_f_setlk',
'io_file_f_setlkw',
'io_file_fd_cloexec',
'io_file_f_rdlck',
'io_file_f_unlck',
'io_file_f_wrlck',
'io_dir_dt_unknown',
'io_dir_dt_fifo',
'io_dir_dt_chr',
'io_dir_dt_blk',
'io_dir_dt_reg',
'io_dir_dt_sock',
'io_dir_dt_wht',
'io_dir_dt_lnk',
'io_dir_dt_dir',
'io_file_access',
'io_file_chdir',
'io_file_getcwd',
'io_file_chown',
'io_file_lchown',
'io_file_truncate',
'io_file_link',
'io_file_pipe',
'io_file_rmdir',
'io_file_symlink',
'io_file_unlink',
'io_file_remove',
'io_file_rename',
'io_file_tempnam',
'io_file_mkstemp',
'io_file_dirname',
'io_file_realpath',
'io_file_chmod',
'io_file_mkdir',
'io_file_mkfifo',
'io_file_umask',
'io_net_socket',
'io_net_bind',
'io_net_connect',
'io_net_listen',
'io_net_recv',
'io_net_recvfrom',
'io_net_accept',
'io_net_send',
'io_net_sendto',
'io_net_shutdown',
'io_net_getpeername',
'io_net_getsockname',
'io_net_ssl_begin',
'io_net_ssl_end',
'io_net_ssl_shutdown',
'io_net_ssl_setverifylocations',
'io_net_ssl_usecertificatechainfile',
'io_net_ssl_useprivatekeyfile',
'io_net_ssl_connect',
'io_net_ssl_accept',
'io_net_ssl_error',
'io_net_ssl_errorstring',
'io_net_ssl_liberrorstring',
'io_net_ssl_funcerrorstring',
'io_net_ssl_reasonerrorstring',
'io_net_ssl_setconnectstate',
'io_net_ssl_setacceptstate',
'io_net_ssl_read',
'io_net_ssl_write',
'io_file_stat_size',
'io_file_stat_mode',
'io_file_stat_mtime',
'io_file_stat_atime',
'io_file_lstat_size',
'io_file_lstat_mode',
'io_file_lstat_mtime',
'io_file_lstat_atime',
'io_file_readlink',
'io_file_lockf',
'io_file_f_ulock',
'io_file_f_tlock',
'io_file_f_test',
'io_file_stdin',
'io_file_stdout',
'io_file_stderr',
'uchar_alphabetic',
'uchar_ascii_hex_digit',
'uchar_bidi_control',
'uchar_bidi_mirrored',
'uchar_dash',
'uchar_default_ignorable_code_point',
'uchar_deprecated',
'uchar_diacritic',
'uchar_extender',
'uchar_full_composition_exclusion',
'uchar_grapheme_base',
'uchar_grapheme_extend',
'uchar_grapheme_link',
'uchar_hex_digit',
'uchar_hyphen',
'uchar_id_continue',
'uchar_ideographic',
'uchar_ids_binary_operator',
'uchar_ids_trinary_operator',
'uchar_join_control',
'uchar_logical_order_exception',
'uchar_lowercase',
'uchar_math',
'uchar_noncharacter_code_point',
'uchar_quotation_mark',
'uchar_radical',
'uchar_soft_dotted',
'uchar_terminal_punctuation',
'uchar_unified_ideograph',
'uchar_uppercase',
'uchar_white_space',
'uchar_xid_continue',
'uchar_case_sensitive',
'uchar_s_term',
'uchar_variation_selector',
'uchar_nfd_inert',
'uchar_nfkd_inert',
'uchar_nfc_inert',
'uchar_nfkc_inert',
'uchar_segment_starter',
'uchar_pattern_syntax',
'uchar_pattern_white_space',
'uchar_posix_alnum',
'uchar_posix_blank',
'uchar_posix_graph',
'uchar_posix_print',
'uchar_posix_xdigit',
'uchar_bidi_class',
'uchar_block',
'uchar_canonical_combining_class',
'uchar_decomposition_type',
'uchar_east_asian_width',
'uchar_general_category',
'uchar_joining_group',
'uchar_joining_type',
'uchar_line_break',
'uchar_numeric_type',
'uchar_script',
'uchar_hangul_syllable_type',
'uchar_nfd_quick_check',
'uchar_nfkd_quick_check',
'uchar_nfc_quick_check',
'uchar_nfkc_quick_check',
'uchar_lead_canonical_combining_class',
'uchar_trail_canonical_combining_class',
'uchar_grapheme_cluster_break',
'uchar_sentence_break',
'uchar_word_break',
'uchar_general_category_mask',
'uchar_numeric_value',
'uchar_age',
'uchar_bidi_mirroring_glyph',
'uchar_case_folding',
'uchar_iso_comment',
'uchar_lowercase_mapping',
'uchar_name',
'uchar_simple_case_folding',
'uchar_simple_lowercase_mapping',
'uchar_simple_titlecase_mapping',
'uchar_simple_uppercase_mapping',
'uchar_titlecase_mapping',
'uchar_unicode_1_name',
'uchar_uppercase_mapping',
'u_wb_other',
'u_wb_aletter',
'u_wb_format',
'u_wb_katakana',
'u_wb_midletter',
'u_wb_midnum',
'u_wb_numeric',
'u_wb_extendnumlet',
'u_sb_other',
'u_sb_aterm',
'u_sb_close',
'u_sb_format',
'u_sb_lower',
'u_sb_numeric',
'u_sb_oletter',
'u_sb_sep',
'u_sb_sp',
'u_sb_sterm',
'u_sb_upper',
'u_lb_unknown',
'u_lb_ambiguous',
'u_lb_alphabetic',
'u_lb_break_both',
'u_lb_break_after',
'u_lb_break_before',
'u_lb_mandatory_break',
'u_lb_contingent_break',
'u_lb_close_punctuation',
'u_lb_combining_mark',
'u_lb_carriage_return',
'u_lb_exclamation',
'u_lb_glue',
'u_lb_hyphen',
'u_lb_ideographic',
'u_lb_inseparable',
'u_lb_infix_numeric',
'u_lb_line_feed',
'u_lb_nonstarter',
'u_lb_numeric',
'u_lb_open_punctuation',
'u_lb_postfix_numeric',
'u_lb_prefix_numeric',
'u_lb_quotation',
'u_lb_complex_context',
'u_lb_surrogate',
'u_lb_space',
'u_lb_break_symbols',
'u_lb_zwspace',
'u_lb_next_line',
'u_lb_word_joiner',
'u_lb_h2',
'u_lb_h3',
'u_lb_jl',
'u_lb_jt',
'u_lb_jv',
'u_nt_none',
'u_nt_decimal',
'u_nt_digit',
'u_nt_numeric',
'locale_english',
'locale_french',
'locale_german',
'locale_italian',
'locale_japanese',
'locale_korean',
'locale_chinese',
'locale_simplifiedchinese',
'locale_traditionalchinese',
'locale_france',
'locale_germany',
'locale_italy',
'locale_japan',
'locale_korea',
'locale_china',
'locale_prc',
'locale_taiwan',
'locale_uk',
'locale_us',
'locale_canada',
'locale_canadafrench',
'locale_default',
'locale_setdefault',
'locale_isocountries',
'locale_isolanguages',
'locale_availablelocales',
'ucal_listtimezones',
'ucal_era',
'ucal_year',
'ucal_month',
'ucal_weekofyear',
'ucal_weekofmonth',
'ucal_dayofmonth',
'ucal_dayofyear',
'ucal_dayofweek',
'ucal_dayofweekinmonth',
'ucal_ampm',
'ucal_hour',
'ucal_hourofday',
'ucal_minute',
'ucal_second',
'ucal_millisecond',
'ucal_zoneoffset',
'ucal_dstoffset',
'ucal_yearwoy',
'ucal_dowlocal',
'ucal_extendedyear',
'ucal_julianday',
'ucal_millisecondsinday',
'ucal_lenient',
'ucal_firstdayofweek',
'ucal_daysinfirstweek',
'sys_sigalrm',
'sys_sighup',
'sys_sigkill',
'sys_sigpipe',
'sys_sigquit',
'sys_sigusr1',
'sys_sigusr2',
'sys_sigchld',
'sys_sigcont',
'sys_sigstop',
'sys_sigtstp',
'sys_sigttin',
'sys_sigttou',
'sys_sigbus',
'sys_sigprof',
'sys_sigsys',
'sys_sigtrap',
'sys_sigurg',
'sys_sigvtalrm',
'sys_sigxcpu',
'sys_sigxfsz',
'sys_wcontinued',
'sys_wnohang',
'sys_wuntraced',
'sys_sigabrt',
'sys_sigfpe',
'sys_sigill',
'sys_sigint',
'sys_sigsegv',
'sys_sigterm',
'sys_exit',
'sys_fork',
'sys_kill',
'sys_waitpid',
'sys_getegid',
'sys_geteuid',
'sys_getgid',
'sys_getlogin',
'sys_getpid',
'sys_getppid',
'sys_getuid',
'sys_setuid',
'sys_setgid',
'sys_setsid',
'sys_errno',
'sys_strerror',
'sys_time',
'sys_difftime',
'sys_getpwuid',
'sys_getpwnam',
'sys_getgrnam',
'sys_drand48',
'sys_erand48',
'sys_jrand48',
'sys_lcong48',
'sys_lrand48',
'sys_mrand48',
'sys_nrand48',
'sys_srand48',
'sys_random',
'sys_srandom',
'sys_seed48',
'sys_rand',
'sys_srand',
'sys_environ',
'sys_getenv',
'sys_setenv',
'sys_unsetenv',
'sys_uname',
'uuid_compare',
'uuid_copy',
'uuid_generate',
'uuid_generate_random',
'uuid_generate_time',
'uuid_is_null',
'uuid_parse',
'uuid_unparse',
'uuid_unparse_lower',
'uuid_unparse_upper',
'sys_credits',
'sleep',
'sys_dll_ext',
'sys_listtypes',
'sys_listtraits',
'sys_listunboundmethods',
'sys_getthreadcount',
'sys_growheapby',
'sys_getheapsize',
'sys_getheapfreebytes',
'sys_getbytessincegc',
'sys_garbagecollect',
'sys_clock',
'sys_getstartclock',
'sys_clockspersec',
'sys_pointersize',
'sys_loadlibrary',
'sys_getchar',
'sys_chroot',
'sys_exec',
'sys_kill_exec',
'sys_wait_exec',
'sys_test_exec',
'sys_detach_exec',
'sys_pid_exec',
'wifexited',
'wexitstatus',
'wifsignaled',
'wtermsig',
'wifstopped',
'wstopsig',
'wifcontinued',
'sys_eol',
'sys_iswindows',
'sys_is_windows',
'sys_isfullpath',
'sys_is_full_path',
'lcapi_loadmodule',
'lcapi_listdatasources',
'encrypt_blowfish',
'decrypt_blowfish',
'cipher_digest',
'cipher_encrypt',
'cipher_decrypt',
'cipher_list',
'cipher_keylength',
'cipher_hmac',
'cipher_seal',
'cipher_open',
'cipher_sign',
'cipher_verify',
'cipher_decrypt_private',
'cipher_decrypt_public',
'cipher_encrypt_private',
'cipher_encrypt_public',
'cipher_generate_key',
'tag_exists',
'curl_easy_init',
'curl_easy_duphandle',
'curl_easy_cleanup',
'curl_easy_getinfo',
'curl_multi_perform',
'curl_multi_result',
'curl_easy_reset',
'curl_easy_setopt',
'curl_easy_strerror',
'curl_getdate',
'curl_version',
'curl_version_info',
'curlinfo_effective_url',
'curlinfo_content_type',
'curlinfo_response_code',
'curlinfo_header_size',
'curlinfo_request_size',
'curlinfo_ssl_verifyresult',
'curlinfo_filetime',
'curlinfo_redirect_count',
'curlinfo_http_connectcode',
'curlinfo_httpauth_avail',
'curlinfo_proxyauth_avail',
'curlinfo_os_errno',
'curlinfo_num_connects',
'curlinfo_total_time',
'curlinfo_namelookup_time',
'curlinfo_connect_time',
'curlinfo_pretransfer_time',
'curlinfo_size_upload',
'curlinfo_size_download',
'curlinfo_speed_download',
'curlinfo_speed_upload',
'curlinfo_content_length_download',
'curlinfo_content_length_upload',
'curlinfo_starttransfer_time',
'curlinfo_redirect_time',
'curlinfo_ssl_engines',
'curlopt_url',
'curlopt_postfields',
'curlopt_cainfo',
'curlopt_capath',
'curlopt_cookie',
'curlopt_cookiefile',
'curlopt_cookiejar',
'curlopt_customrequest',
'curlopt_egdsocket',
'curlopt_encoding',
'curlopt_ftp_account',
'curlopt_ftpport',
'curlopt_interface',
'curlopt_krb4level',
'curlopt_netrc_file',
'curlopt_proxy',
'curlopt_proxyuserpwd',
'curlopt_random_file',
'curlopt_range',
'curlopt_readdata',
'curlopt_referer',
'curlopt_ssl_cipher_list',
'curlopt_sslcert',
'curlopt_sslcerttype',
'curlopt_sslengine',
'curlopt_sslkey',
'curlopt_sslkeypasswd',
'curlopt_sslkeytype',
'curlopt_useragent',
'curlopt_userpwd',
'curlopt_postfieldsize',
'curlopt_autoreferer',
'curlopt_buffersize',
'curlopt_connecttimeout',
'curlopt_cookiesession',
'curlopt_crlf',
'curlopt_dns_use_global_cache',
'curlopt_failonerror',
'curlopt_filetime',
'curlopt_followlocation',
'curlopt_forbid_reuse',
'curlopt_fresh_connect',
'curlopt_ftp_create_missing_dirs',
'curlopt_ftp_response_timeout',
'curlopt_ftp_ssl',
'curlopt_use_ssl',
'curlopt_ftp_use_eprt',
'curlopt_ftp_use_epsv',
'curlopt_ftpappend',
'curlopt_ftplistonly',
'curlopt_ftpsslauth',
'curlopt_header',
'curlopt_http_version',
'curlopt_httpauth',
'curlopt_httpget',
'curlopt_httpproxytunnel',
'curlopt_infilesize',
'curlopt_ipresolve',
'curlopt_low_speed_limit',
'curlopt_low_speed_time',
'curlopt_maxconnects',
'curlopt_maxfilesize',
'curlopt_maxredirs',
'curlopt_netrc',
'curlopt_nobody',
'curlopt_noprogress',
'curlopt_port',
'curlopt_post',
'curlopt_proxyauth',
'curlopt_proxyport',
'curlopt_proxytype',
'curlopt_put',
'curlopt_resume_from',
'curlopt_ssl_verifyhost',
'curlopt_ssl_verifypeer',
'curlopt_sslengine_default',
'curlopt_sslversion',
'curlopt_tcp_nodelay',
'curlopt_timecondition',
'curlopt_timeout',
'curlopt_timevalue',
'curlopt_transfertext',
'curlopt_unrestricted_auth',
'curlopt_upload',
'curlopt_verbose',
'curlopt_infilesize_large',
'curlopt_maxfilesize_large',
'curlopt_postfieldsize_large',
'curlopt_resume_from_large',
'curlopt_http200aliases',
'curlopt_httpheader',
'curlopt_postquote',
'curlopt_prequote',
'curlopt_quote',
'curlopt_httppost',
'curlopt_writedata',
'curl_version_ipv6',
'curl_version_kerberos4',
'curl_version_ssl',
'curl_version_libz',
'curl_version_ntlm',
'curl_version_gssnegotiate',
'curl_version_debug',
'curl_version_asynchdns',
'curl_version_spnego',
'curl_version_largefile',
'curl_version_idn',
'curl_netrc_ignored',
'curl_netrc_optional',
'curl_netrc_required',
'curl_http_version_none',
'curl_http_version_1_0',
'curl_http_version_1_1',
'curl_ipresolve_whatever',
'curl_ipresolve_v4',
'curl_ipresolve_v6',
'curlftpssl_none',
'curlftpssl_try',
'curlftpssl_control',
'curlftpssl_all',
'curlftpssl_last',
'curlftpauth_default',
'curlftpauth_ssl',
'curlftpauth_tls',
'curlauth_none',
'curlauth_basic',
'curlauth_digest',
'curlauth_gssnegotiate',
'curlauth_ntlm',
'curlauth_any',
'curlauth_anysafe',
'curlproxy_http',
'curlproxy_socks4',
'curlproxy_socks5',
'curle_ok',
'curle_unsupported_protocol',
'curle_failed_init',
'curle_url_malformat',
'curle_url_malformat_user',
'curle_couldnt_resolve_proxy',
'curle_couldnt_resolve_host',
'curle_couldnt_connect',
'curle_ftp_weird_server_reply',
'curle_ftp_access_denied',
'curle_ftp_user_password_incorrect',
'curle_ftp_weird_pass_reply',
'curle_ftp_weird_user_reply',
'curle_ftp_weird_pasv_reply',
'curle_ftp_weird_227_format',
'curle_ftp_cant_get_host',
'curle_ftp_cant_reconnect',
'curle_ftp_couldnt_set_binary',
'curle_partial_file',
'curle_ftp_couldnt_retr_file',
'curle_ftp_write_error',
'curle_ftp_quote_error',
'curle_http_returned_error',
'curle_write_error',
'curle_malformat_user',
'curle_read_error',
'curle_out_of_memory',
'curle_operation_timeouted',
'curle_ftp_couldnt_set_ascii',
'curle_ftp_port_failed',
'curle_ftp_couldnt_use_rest',
'curle_ftp_couldnt_get_size',
'curle_http_range_error',
'curle_http_post_error',
'curle_ssl_connect_error',
'curle_bad_download_resume',
'curle_file_couldnt_read_file',
'curle_ldap_cannot_bind',
'curle_ldap_search_failed',
'curle_library_not_found',
'curle_function_not_found',
'curle_aborted_by_callback',
'curle_bad_function_argument',
'curle_bad_calling_order',
'curle_interface_failed',
'curle_bad_password_entered',
'curle_too_many_redirects',
'curle_unknown_telnet_option',
'curle_telnet_option_syntax',
'curle_obsolete',
'curle_ssl_peer_certificate',
'curle_got_nothing',
'curle_ssl_engine_notfound',
'curle_ssl_engine_setfailed',
'curle_send_error',
'curle_recv_error',
'curle_share_in_use',
'curle_ssl_certproblem',
'curle_ssl_cipher',
'curle_ssl_cacert',
'curle_bad_content_encoding',
'curle_ldap_invalid_url',
'curle_filesize_exceeded',
'curle_ftp_ssl_failed',
'curle_send_fail_rewind',
'curle_ssl_engine_initfailed',
'curle_login_denied',
'curlmsg_done',
'zip_open',
'zip_name_locate',
'zip_fopen',
'zip_fopen_index',
'zip_fread',
'zip_fclose',
'zip_close',
'zip_stat',
'zip_stat_index',
'zip_get_archive_comment',
'zip_get_file_comment',
'zip_get_name',
'zip_get_num_files',
'zip_add',
'zip_replace',
'zip_add_dir',
'zip_set_file_comment',
'zip_rename',
'zip_delete',
'zip_unchange',
'zip_unchange_all',
'zip_unchange_archive',
'zip_set_archive_comment',
'zip_error_to_str',
'zip_file_strerror',
'zip_strerror',
'zip_error_get',
'zip_file_error_get',
'zip_error_get_sys_type',
'zlib_version',
'fastcgi_initiate_request',
'debugging_enabled',
'debugging_stop',
'evdns_resolve_ipv4',
'evdns_resolve_ipv6',
'evdns_resolve_reverse',
'evdns_resolve_reverse_ipv6',
'stdout',
'stdoutnl',
'fail',
'fail_if',
'fail_ifnot',
'error_code',
'error_msg',
'error_obj',
'error_stack',
'error_push',
'error_pop',
'error_reset',
'error_msg_invalidparameter',
'error_code_invalidparameter',
'error_msg_networkerror',
'error_code_networkerror',
'error_msg_runtimeassertion',
'error_code_runtimeassertion',
'error_msg_methodnotfound',
'error_code_methodnotfound',
'error_msg_resnotfound',
'error_code_resnotfound',
'error_msg_filenotfound',
'error_code_filenotfound',
'error_msg_aborted',
'error_code_aborted',
'error_msg_dividebyzero',
'error_code_dividebyzero',
'error_msg_noerror',
'error_code_noerror',
'abort',
'protect',
'generateforeach',
'method_name',
'queriable_do',
'queriable_sum',
'queriable_average',
'queriable_min',
'queriable_max',
'queriable_internal_combinebindings',
'queriable_defaultcompare',
'queriable_reversecompare',
'queriable_qsort',
'timer',
'thread_var_push',
'thread_var_pop',
'thread_var_get',
'loop_value',
'loop_value_push',
'loop_value_pop',
'loop_key',
'loop_key_push',
'loop_key_pop',
'loop_push',
'loop_pop',
'loop_count',
'loop_continue',
'loop_abort',
'loop',
'sys_while',
'sys_iterate',
'string_validcharset',
'eol',
'encoding_utf8',
'encoding_iso88591',
'integer_random',
'integer_bitor',
'millis',
'micros',
'max',
'min',
'range',
'median',
'decimal_random',
'pi',
'lcapi_datasourceinit',
'lcapi_datasourceterm',
'lcapi_datasourcenames',
'lcapi_datasourcetablenames',
'lcapi_datasourcesearch',
'lcapi_datasourceadd',
'lcapi_datasourceupdate',
'lcapi_datasourcedelete',
'lcapi_datasourceinfo',
'lcapi_datasourceexecsql',
'lcapi_datasourcerandom',
'lcapi_datasourceschemanames',
'lcapi_datasourcecloseconnection',
'lcapi_datasourcetickle',
'lcapi_datasourceduplicate',
'lcapi_datasourcescripts',
'lcapi_datasourceimage',
'lcapi_datasourcefindall',
'lcapi_datasourcematchesname',
'lcapi_datasourcepreparesql',
'lcapi_datasourceunpreparesql',
'lcapi_datasourcenothing',
'lcapi_fourchartointeger',
'lcapi_datasourcetypestring',
'lcapi_datasourcetypeinteger',
'lcapi_datasourcetypeboolean',
'lcapi_datasourcetypeblob',
'lcapi_datasourcetypedecimal',
'lcapi_datasourcetypedate',
'lcapi_datasourceprotectionnone',
'lcapi_datasourceprotectionreadonly',
'lcapi_datasourceopgt',
'lcapi_datasourceopgteq',
'lcapi_datasourceopeq',
'lcapi_datasourceopneq',
'lcapi_datasourceoplt',
'lcapi_datasourceoplteq',
'lcapi_datasourceopbw',
'lcapi_datasourceopew',
'lcapi_datasourceopct',
'lcapi_datasourceopnct',
'lcapi_datasourceopnbw',
'lcapi_datasourceopnew',
'lcapi_datasourceopand',
'lcapi_datasourceopor',
'lcapi_datasourceopnot',
'lcapi_datasourceopno',
'lcapi_datasourceopany',
'lcapi_datasourceopin',
'lcapi_datasourceopnin',
'lcapi_datasourceopft',
'lcapi_datasourceoprx',
'lcapi_datasourceopnrx',
'lcapi_datasourcesortascending',
'lcapi_datasourcesortdescending',
'lcapi_datasourcesortcustom',
'lcapi_loadmodules',
'lasso_version',
'lasso_uniqueid',
'usage',
'file_defaultencoding',
'file_copybuffersize',
'file_modeline',
'file_modechar',
'file_forceroot',
'file_tempfile',
'file_stdin',
'file_stdout',
'file_stderr',
'lasso_tagexists',
'lasso_methodexists',
'output',
'if_empty',
'if_null',
'if_true',
'if_false',
'process',
'treemap',
'locale_format',
'compress',
'uncompress',
'decompress',
'tag_name',
'series',
'nslookup',
'all',
'bw',
'cn',
'eq',
'ew',
'ft',
'gt',
'gte',
'lt',
'lte',
'neq',
'nrx',
'rx',
'none',
'minimal',
'full',
'output_none',
'lasso_executiontimelimit',
'namespace_global',
'namespace_using',
'namespace_import',
'site_id',
'site_name',
'sys_homepath',
'sys_masterhomepath',
'sys_supportpath',
'sys_librariespath',
'sys_databasespath',
'sys_usercapimodulepath',
'sys_appspath',
'sys_userstartuppath',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'mysqlds',
'odbc',
'sqliteconnector',
'sqlite_createdb',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'java_jvm_getenv',
'java_jvm_create',
'java_jdbc_load',
'database_database',
'database_table_datasources',
'database_table_datasource_hosts',
'database_table_datasource_databases',
'database_table_database_tables',
'database_table_table_fields',
'database_qs',
'database_initialize',
'database_util_cleanpath',
'database_adddefaultsqlitehost',
'sqlite_ok',
'sqlite_error',
'sqlite_internal',
'sqlite_perm',
'sqlite_abort',
'sqlite_busy',
'sqlite_locked',
'sqlite_nomem',
'sqlite_readonly',
'sqlite_interrupt',
'sqlite_ioerr',
'sqlite_corrupt',
'sqlite_notfound',
'sqlite_full',
'sqlite_cantopen',
'sqlite_protocol',
'sqlite_empty',
'sqlite_schema',
'sqlite_toobig',
'sqlite_constraint',
'sqlite_mismatch',
'sqlite_misuse',
'sqlite_nolfs',
'sqlite_auth',
'sqlite_format',
'sqlite_range',
'sqlite_notadb',
'sqlite_row',
'sqlite_done',
'sqlite_integer',
'sqlite_float',
'sqlite_blob',
'sqlite_null',
'sqlite_text',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'include_url',
'ftp_getdata',
'ftp_getfile',
'ftp_getlisting',
'ftp_putdata',
'ftp_putfile',
'ftp_deletefile',
'debugging_step_in',
'debugging_get_stack',
'debugging_get_context',
'debugging_detach',
'debugging_step_over',
'debugging_step_out',
'debugging_run',
'debugging_break',
'debugging_breakpoint_set',
'debugging_breakpoint_get',
'debugging_breakpoint_remove',
'debugging_breakpoint_list',
'debugging_breakpoint_update',
'debugging_terminate',
'debugging_context_locals',
'debugging_context_vars',
'debugging_context_self',
'dbgp_stop_stack_name',
'encrypt_md5',
'inline_columninfo_pos',
'inline_resultrows_pos',
'inline_foundcount_pos',
'inline_colinfo_name_pos',
'inline_colinfo_valuelist_pos',
'inline_scopeget',
'inline_scopepush',
'inline_scopepop',
'inline_namedget',
'inline_namedput',
'inline',
'resultset_count',
'resultset',
'resultsets',
'rows',
'rows_impl',
'records',
'column',
'field',
'column_names',
'field_names',
'column_name',
'field_name',
'found_count',
'shown_count',
'shown_first',
'shown_last',
'action_statement',
'lasso_currentaction',
'maxrecords_value',
'skiprecords_value',
'action_param',
'action_params',
'admin_authorization',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'database_name',
'table_name',
'layout_name',
'schema_name',
'keycolumn_name',
'keyfield_name',
'keycolumn_value',
'keyfield_value',
'inline_colinfo_type_pos',
'column_type',
'rows_array',
'records_array',
'records_map',
'json_serialize',
'json_consume_string',
'json_consume_token',
'json_consume_array',
'json_consume_object',
'json_deserialize',
'json_rpccall',
'ljapi_initialize',
'locale_format_style_full',
'locale_format_style_long',
'locale_format_style_medium',
'locale_format_style_short',
'locale_format_style_default',
'locale_format_style_none',
'locale_format_style_date_time',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'admin_initialize',
'admin_getpref',
'admin_setpref',
'admin_removepref',
'admin_userexists',
'admin_lassoservicepath',
'pdf_package',
'pdf_rectangle',
'pdf_serve',
'random_seed',
'xml',
'xml_transform',
'zip_create',
'zip_excl',
'zip_checkcons',
'zip_fl_nocase',
'zip_fl_nodir',
'zip_fl_compressed',
'zip_fl_unchanged',
'zip_er_ok',
'zip_er_multidisk',
'zip_er_rename',
'zip_er_close',
'zip_er_seek',
'zip_er_read',
'zip_er_write',
'zip_er_crc',
'zip_er_zipclosed',
'zip_er_noent',
'zip_er_exists',
'zip_er_open',
'zip_er_tmpopen',
'zip_er_zlib',
'zip_er_memory',
'zip_er_changed',
'zip_er_compnotsupp',
'zip_er_eof',
'zip_er_inval',
'zip_er_nozip',
'zip_er_internal',
'zip_er_incons',
'zip_er_remove',
'zip_er_deleted',
'zip_et_none',
'zip_et_sys',
'zip_et_zlib',
'zip_cm_default',
'zip_cm_store',
'zip_cm_shrink',
'zip_cm_reduce_1',
'zip_cm_reduce_2',
'zip_cm_reduce_3',
'zip_cm_reduce_4',
'zip_cm_implode',
'zip_cm_deflate',
'zip_cm_deflate64',
'zip_cm_pkware_implode',
'zip_cm_bzip2',
'zip_em_none',
'zip_em_trad_pkware',
'zip_em_des',
'zip_em_rc2_old',
'zip_em_3des_168',
'zip_em_3des_112',
'zip_em_aes_128',
'zip_em_aes_192',
'zip_em_aes_256',
'zip_em_rc2',
'zip_em_rc4',
'zip_em_unknown',
'dns_lookup',
'dns_default',
'string_charfromname',
'string_concatenate',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_lowercase',
'document',
'email_attachment_mime_type',
'email_translatebreakstocrlf',
'email_findemails',
'email_fix_address',
'email_fix_address_list',
'encode_qheader',
'email_send',
'email_queue',
'email_immediate',
'email_result',
'email_status',
'email_token',
'email_merge',
'email_batch',
'email_safeemail',
'email_extract',
'email_pop_priv_substring',
'email_pop_priv_extract',
'email_digestchallenge',
'email_pop_priv_quote',
'email_digestresponse',
'encrypt_hmac',
'encrypt_crammd5',
'email_fs_error_clean',
'email_initialize',
'email_mxlookup',
'lasso_errorreporting',
'fcgi_version_1',
'fcgi_null_request_id',
'fcgi_begin_request',
'fcgi_abort_request',
'fcgi_end_request',
'fcgi_params',
'fcgi_stdin',
'fcgi_stdout',
'fcgi_stderr',
'fcgi_data',
'fcgi_get_values',
'fcgi_get_values_result',
'fcgi_unknown_type',
'fcgi_keep_conn',
'fcgi_responder',
'fcgi_authorize',
'fcgi_filter',
'fcgi_request_complete',
'fcgi_cant_mpx_conn',
'fcgi_overloaded',
'fcgi_unknown_role',
'fcgi_max_conns',
'fcgi_max_reqs',
'fcgi_mpxs_conns',
'fcgi_read_timeout_seconds',
'fcgi_makeendrequestbody',
'fcgi_bodychunksize',
'fcgi_makestdoutbody',
'fcgi_readparam',
'web_request',
'include_cache_compare',
'fastcgi_initialize',
'fastcgi_handlecon',
'fastcgi_handlereq',
'fastcgi_createfcgirequest',
'web_handlefcgirequest',
'filemakerds_initialize',
'filemakerds',
'value_listitem',
'valuelistitem',
'selected',
'checked',
'value_list',
'http_char_space',
'http_char_htab',
'http_char_cr',
'http_char_lf',
'http_char_question',
'http_char_colon',
'http_read_timeout_secs',
'http_default_files',
'http_server_apps_path',
'jdbc_initialize',
'lassoapp_settingsdb',
'lassoapp_format_mod_date',
'lassoapp_include_current',
'lassoapp_include',
'lassoapp_find_missing_file',
'lassoapp_get_capabilities_name',
'lassoapp_exists',
'lassoapp_path_to_method_name',
'lassoapp_invoke_resource',
'lassoapp_initialize_db',
'lassoapp_initialize',
'lassoapp_issourcefileextension',
'lassoapp_current_include',
'lassoapp_current_app',
'lassoapp_do_with_include',
'lassoapp_link',
'lassoapp_load_module',
'lassoapp_mime_type_html',
'lassoapp_mime_type_lasso',
'lassoapp_mime_type_xml',
'lassoapp_mime_type_ppt',
'lassoapp_mime_type_js',
'lassoapp_mime_type_txt',
'lassoapp_mime_type_jpg',
'lassoapp_mime_type_png',
'lassoapp_mime_type_gif',
'lassoapp_mime_type_css',
'lassoapp_mime_type_csv',
'lassoapp_mime_type_tif',
'lassoapp_mime_type_ico',
'lassoapp_mime_type_rss',
'lassoapp_mime_type_xhr',
'lassoapp_mime_type_pdf',
'lassoapp_mime_type_docx',
'lassoapp_mime_type_doc',
'lassoapp_mime_type_zip',
'lassoapp_mime_type_svg',
'lassoapp_mime_type_ttf',
'lassoapp_mime_type_woff',
'lassoapp_mime_type_swf',
'lassoapp_mime_get',
'log_level_critical',
'log_level_warning',
'log_level_detail',
'log_level_sql',
'log_level_deprecated',
'log_destination_console',
'log_destination_file',
'log_destination_database',
'log',
'log_setdestination',
'log_always',
'log_critical',
'log_warning',
'log_detail',
'log_sql',
'log_deprecated',
'log_max_file_size',
'log_trim_file_size',
'log_initialize',
'portal',
'security_database',
'security_table_groups',
'security_table_users',
'security_table_ug_map',
'security_default_realm',
'security_initialize',
'session_initialize',
'session_getdefaultdriver',
'session_setdefaultdriver',
'session_start',
'session_addvar',
'session_removevar',
'session_end',
'session_id',
'session_abort',
'session_result',
'session_deleteexpired',
'odbc_session_driver_mssql',
'session_decorate',
'auth_admin',
'auth_check',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'client_addr',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_getparam',
'client_headers',
'client_integertoip',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_postparam',
'client_type',
'client_username',
'client_url',
'referer_url',
'referrer_url',
'content_type',
'content_encoding',
'cookie',
'cookie_set',
'include',
'include_currentpath',
'include_filepath',
'include_localpath',
'include_once',
'include_path',
'include_raw',
'includes',
'library',
'library_once',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'response_root',
'redirect_url',
'server_admin',
'server_name',
'server_ip',
'server_port',
'server_protocol',
'server_signature',
'server_software',
'server_push',
'token_value',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxhorzpixels',
'wap_maxvertpixels',
'wap_maxcolumns',
'wap_maxrows',
'define_atbegin',
'define_atend',
'content_header',
'content_addheader',
'content_replaceheader',
'content_body',
'html_comment',
'web_node_forpath',
'web_nodes_requesthandler',
'web_nodes_normalizeextension',
'web_nodes_processcontentnode',
'web_nodes_initialize',
'web_node_content_representation_xhr',
'web_node_content_representation_html',
'web_node_content_representation_css',
'web_node_content_representation_js',
'web_response_nodesentry',
'web_response',
'web_router_database',
'web_router_initialize'
],
'Lasso 8 Tags': [
'__char',
'__sync_timestamp__',
'_admin_addgroup',
'_admin_adduser',
'_admin_defaultconnector',
'_admin_defaultconnectornames',
'_admin_defaultdatabase',
'_admin_defaultfield',
'_admin_defaultgroup',
'_admin_defaulthost',
'_admin_defaulttable',
'_admin_defaultuser',
'_admin_deleteconnector',
'_admin_deletedatabase',
'_admin_deletefield',
'_admin_deletegroup',
'_admin_deletehost',
'_admin_deletetable',
'_admin_deleteuser',
'_admin_duplicategroup',
'_admin_internaldatabase',
'_admin_listconnectors',
'_admin_listdatabases',
'_admin_listfields',
'_admin_listgroups',
'_admin_listhosts',
'_admin_listtables',
'_admin_listusers',
'_admin_refreshconnector',
'_admin_refreshsecurity',
'_admin_servicepath',
'_admin_updateconnector',
'_admin_updatedatabase',
'_admin_updatefield',
'_admin_updategroup',
'_admin_updatehost',
'_admin_updatetable',
'_admin_updateuser',
'_chartfx_activation_string',
'_chartfx_getchallengestring',
'_chop_args',
'_chop_mimes',
'_client_addr_old',
'_client_address_old',
'_client_ip_old',
'_database_names',
'_datasource_reload',
'_date_current',
'_date_format',
'_date_msec',
'_date_parse',
'_execution_timelimit',
'_file_chmod',
'_initialize',
'_jdbc_acceptsurl',
'_jdbc_debug',
'_jdbc_deletehost',
'_jdbc_driverclasses',
'_jdbc_driverinfo',
'_jdbc_metainfo',
'_jdbc_propertyinfo',
'_jdbc_setdriver',
'_lasso_param',
'_log_helper',
'_proc_noparam',
'_proc_withparam',
'_recursion_limit',
'_request_param',
'_security_binaryexpiration',
'_security_flushcaches',
'_security_isserialized',
'_security_serialexpiration',
'_srand',
'_strict_literals',
'_substring',
'_xmlrpc_exconverter',
'_xmlrpc_inconverter',
'_xmlrpc_xmlinconverter',
'abort',
'action_addinfo',
'action_addrecord',
'action_param',
'action_params',
'action_setfoundcount',
'action_setrecordid',
'action_settotalcount',
'action_statement',
'admin_allowedfileroots',
'admin_changeuser',
'admin_createuser',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_groupassignuser',
'admin_grouplistusers',
'admin_groupremoveuser',
'admin_lassoservicepath',
'admin_listgroups',
'admin_refreshlicensing',
'admin_refreshsecurity',
'admin_reloaddatasource',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'admin_userlistgroups',
'all',
'and',
'array',
'array_iterator',
'auth',
'auth_admin',
'auth_auth',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'base64',
'bean',
'bigint',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'boolean',
'bw',
'bytes',
'cache',
'cache_delete',
'cache_empty',
'cache_exists',
'cache_fetch',
'cache_internal',
'cache_maintenance',
'cache_object',
'cache_preferences',
'cache_store',
'case',
'chartfx',
'chartfx_records',
'chartfx_serve',
'checked',
'choice_list',
'choice_listitem',
'choicelistitem',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'click_text',
'client_addr',
'client_address',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_headers',
'client_ip',
'client_ipfrominteger',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column',
'column_name',
'column_names',
'compare_beginswith',
'compare_contains',
'compare_endswith',
'compare_equalto',
'compare_greaterthan',
'compare_greaterthanorequals',
'compare_greaterthanorequls',
'compare_lessthan',
'compare_lessthanorequals',
'compare_notbeginswith',
'compare_notcontains',
'compare_notendswith',
'compare_notequalto',
'compare_notregexp',
'compare_regexp',
'compare_strictequalto',
'compare_strictnotequalto',
'compiler_removecacheddoc',
'compiler_setdefaultparserflags',
'compress',
'content_body',
'content_encoding',
'content_header',
'content_type',
'cookie',
'cookie_set',
'curl_ftp_getfile',
'curl_ftp_getlisting',
'curl_ftp_putfile',
'curl_include_url',
'currency',
'database_changecolumn',
'database_changefield',
'database_createcolumn',
'database_createfield',
'database_createtable',
'database_fmcontainer',
'database_hostinfo',
'database_inline',
'database_name',
'database_nameitem',
'database_names',
'database_realname',
'database_removecolumn',
'database_removefield',
'database_removetable',
'database_repeating',
'database_repeating_valueitem',
'database_repeatingvalueitem',
'database_schemanameitem',
'database_schemanames',
'database_tablecolumn',
'database_tablenameitem',
'database_tablenames',
'datasource_name',
'datasource_register',
'date',
'date__date_current',
'date__date_format',
'date__date_msec',
'date__date_parse',
'date_add',
'date_date',
'date_difference',
'date_duration',
'date_format',
'date_getcurrentdate',
'date_getday',
'date_getdayofweek',
'date_gethour',
'date_getlocaltimezone',
'date_getminute',
'date_getmonth',
'date_getsecond',
'date_gettime',
'date_getyear',
'date_gmttolocal',
'date_localtogmt',
'date_maximum',
'date_minimum',
'date_msec',
'date_setformat',
'date_subtract',
'db_layoutnameitem',
'db_layoutnames',
'db_nameitem',
'db_names',
'db_tablenameitem',
'db_tablenames',
'dbi_column_names',
'dbi_field_names',
'decimal',
'decimal_setglobaldefaultprecision',
'decode_base64',
'decode_bheader',
'decode_hex',
'decode_html',
'decode_json',
'decode_qheader',
'decode_quotedprintable',
'decode_quotedprintablebytes',
'decode_url',
'decode_xml',
'decompress',
'decrypt_blowfish',
'decrypt_blowfish2',
'default',
'define_atbegin',
'define_atend',
'define_constant',
'define_prototype',
'define_tag',
'define_tagp',
'define_type',
'define_typep',
'deserialize',
'directory_directorynameitem',
'directory_lister',
'directory_nameitem',
'directorynameitem',
'dns_default',
'dns_lookup',
'dns_response',
'duration',
'else',
'email_batch',
'email_compose',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_immediate',
'email_merge',
'email_mxerror',
'email_mxlookup',
'email_parse',
'email_pop',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_smtp',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_base64',
'encode_bheader',
'encode_break',
'encode_breaks',
'encode_crc32',
'encode_hex',
'encode_html',
'encode_htmltoxml',
'encode_json',
'encode_qheader',
'encode_quotedprintable',
'encode_quotedprintablebytes',
'encode_set',
'encode_smart',
'encode_sql',
'encode_sql92',
'encode_stricturl',
'encode_url',
'encode_xml',
'encrypt_blowfish',
'encrypt_blowfish2',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eq',
'error_adderror',
'error_code',
'error_code_aborted',
'error_code_assert',
'error_code_bof',
'error_code_connectioninvalid',
'error_code_couldnotclosefile',
'error_code_couldnotcreateoropenfile',
'error_code_couldnotdeletefile',
'error_code_couldnotdisposememory',
'error_code_couldnotlockmemory',
'error_code_couldnotreadfromfile',
'error_code_couldnotunlockmemory',
'error_code_couldnotwritetofile',
'error_code_criterianotmet',
'error_code_datasourceerror',
'error_code_directoryfull',
'error_code_diskfull',
'error_code_dividebyzero',
'error_code_eof',
'error_code_failure',
'error_code_fieldrestriction',
'error_code_file',
'error_code_filealreadyexists',
'error_code_filecorrupt',
'error_code_fileinvalid',
'error_code_fileinvalidaccessmode',
'error_code_fileisclosed',
'error_code_fileisopen',
'error_code_filelocked',
'error_code_filenotfound',
'error_code_fileunlocked',
'error_code_httpfilenotfound',
'error_code_illegalinstruction',
'error_code_illegaluseoffrozeninstance',
'error_code_invaliddatabase',
'error_code_invalidfilename',
'error_code_invalidmemoryobject',
'error_code_invalidparameter',
'error_code_invalidpassword',
'error_code_invalidpathname',
'error_code_invalidusername',
'error_code_ioerror',
'error_code_loopaborted',
'error_code_memory',
'error_code_network',
'error_code_nilpointer',
'error_code_noerr',
'error_code_nopermission',
'error_code_outofmemory',
'error_code_outofstackspace',
'error_code_overflow',
'error_code_postconditionfailed',
'error_code_preconditionfailed',
'error_code_resnotfound',
'error_code_resource',
'error_code_streamreaderror',
'error_code_streamwriteerror',
'error_code_syntaxerror',
'error_code_tagnotfound',
'error_code_unknownerror',
'error_code_varnotfound',
'error_code_volumedoesnotexist',
'error_code_webactionnotsupported',
'error_code_webadderror',
'error_code_webdeleteerror',
'error_code_webmodulenotfound',
'error_code_webnosuchobject',
'error_code_webrepeatingrelatedfield',
'error_code_webrequiredfieldmissing',
'error_code_webtimeout',
'error_code_webupdateerror',
'error_columnrestriction',
'error_currenterror',
'error_databaseconnectionunavailable',
'error_databasetimeout',
'error_deleteerror',
'error_fieldrestriction',
'error_filenotfound',
'error_invaliddatabase',
'error_invalidpassword',
'error_invalidusername',
'error_modulenotfound',
'error_msg',
'error_msg_aborted',
'error_msg_assert',
'error_msg_bof',
'error_msg_connectioninvalid',
'error_msg_couldnotclosefile',
'error_msg_couldnotcreateoropenfile',
'error_msg_couldnotdeletefile',
'error_msg_couldnotdisposememory',
'error_msg_couldnotlockmemory',
'error_msg_couldnotreadfromfile',
'error_msg_couldnotunlockmemory',
'error_msg_couldnotwritetofile',
'error_msg_criterianotmet',
'error_msg_datasourceerror',
'error_msg_directoryfull',
'error_msg_diskfull',
'error_msg_dividebyzero',
'error_msg_eof',
'error_msg_failure',
'error_msg_fieldrestriction',
'error_msg_file',
'error_msg_filealreadyexists',
'error_msg_filecorrupt',
'error_msg_fileinvalid',
'error_msg_fileinvalidaccessmode',
'error_msg_fileisclosed',
'error_msg_fileisopen',
'error_msg_filelocked',
'error_msg_filenotfound',
'error_msg_fileunlocked',
'error_msg_httpfilenotfound',
'error_msg_illegalinstruction',
'error_msg_illegaluseoffrozeninstance',
'error_msg_invaliddatabase',
'error_msg_invalidfilename',
'error_msg_invalidmemoryobject',
'error_msg_invalidparameter',
'error_msg_invalidpassword',
'error_msg_invalidpathname',
'error_msg_invalidusername',
'error_msg_ioerror',
'error_msg_loopaborted',
'error_msg_memory',
'error_msg_network',
'error_msg_nilpointer',
'error_msg_noerr',
'error_msg_nopermission',
'error_msg_outofmemory',
'error_msg_outofstackspace',
'error_msg_overflow',
'error_msg_postconditionfailed',
'error_msg_preconditionfailed',
'error_msg_resnotfound',
'error_msg_resource',
'error_msg_streamreaderror',
'error_msg_streamwriteerror',
'error_msg_syntaxerror',
'error_msg_tagnotfound',
'error_msg_unknownerror',
'error_msg_varnotfound',
'error_msg_volumedoesnotexist',
'error_msg_webactionnotsupported',
'error_msg_webadderror',
'error_msg_webdeleteerror',
'error_msg_webmodulenotfound',
'error_msg_webnosuchobject',
'error_msg_webrepeatingrelatedfield',
'error_msg_webrequiredfieldmissing',
'error_msg_webtimeout',
'error_msg_webupdateerror',
'error_noerror',
'error_nopermission',
'error_norecordsfound',
'error_outofmemory',
'error_pop',
'error_push',
'error_reqcolumnmissing',
'error_reqfieldmissing',
'error_requiredcolumnmissing',
'error_requiredfieldmissing',
'error_reset',
'error_seterrorcode',
'error_seterrormessage',
'error_updateerror',
'euro',
'event_schedule',
'ew',
'fail',
'fail_if',
'false',
'field',
'field_name',
'field_names',
'file',
'file_autoresolvefullpaths',
'file_chmod',
'file_control',
'file_copy',
'file_create',
'file_creationdate',
'file_currenterror',
'file_delete',
'file_exists',
'file_getlinecount',
'file_getsize',
'file_isdirectory',
'file_listdirectory',
'file_moddate',
'file_modechar',
'file_modeline',
'file_move',
'file_openread',
'file_openreadwrite',
'file_openwrite',
'file_openwriteappend',
'file_openwritetruncate',
'file_probeeol',
'file_processuploads',
'file_read',
'file_readline',
'file_rename',
'file_serve',
'file_setsize',
'file_stream',
'file_streamcopy',
'file_uploads',
'file_waitread',
'file_waittimeout',
'file_waitwrite',
'file_write',
'find_soap_ops',
'form_param',
'found_count',
'ft',
'ftp_getfile',
'ftp_getlisting',
'ftp_putfile',
'full',
'global',
'global_defined',
'global_remove',
'global_reset',
'globals',
'gt',
'gte',
'handle',
'handle_error',
'header',
'html_comment',
'http_getfile',
'ical_alarm',
'ical_attribute',
'ical_calendar',
'ical_daylight',
'ical_event',
'ical_freebusy',
'ical_item',
'ical_journal',
'ical_parse',
'ical_standard',
'ical_timezone',
'ical_todo',
'if',
'if_empty',
'if_false',
'if_null',
'if_true',
'image',
'image_url',
'img',
'include',
'include_cgi',
'include_currentpath',
'include_once',
'include_raw',
'include_url',
'inline',
'integer',
'iterate',
'iterator',
'java',
'java_bean',
'json_records',
'json_rpccall',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_comment',
'lasso_currentaction',
'lasso_datasourceis',
'lasso_datasourceis4d',
'lasso_datasourceisfilemaker',
'lasso_datasourceisfilemaker7',
'lasso_datasourceisfilemaker9',
'lasso_datasourceisfilemakersa',
'lasso_datasourceisjdbc',
'lasso_datasourceislassomysql',
'lasso_datasourceismysql',
'lasso_datasourceisodbc',
'lasso_datasourceisopenbase',
'lasso_datasourceisoracle',
'lasso_datasourceispostgresql',
'lasso_datasourceisspotlight',
'lasso_datasourceissqlite',
'lasso_datasourceissqlserver',
'lasso_datasourcemodulename',
'lasso_datatype',
'lasso_disableondemand',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_parser',
'lasso_process',
'lasso_sessionid',
'lasso_siteid',
'lasso_siteisrunning',
'lasso_sitename',
'lasso_siterestart',
'lasso_sitestart',
'lasso_sitestop',
'lasso_tagexists',
'lasso_tagmodulename',
'lasso_uniqueid',
'lasso_updatecheck',
'lasso_uptime',
'lasso_version',
'lassoapp_create',
'lassoapp_dump',
'lassoapp_flattendir',
'lassoapp_getappdata',
'lassoapp_link',
'lassoapp_list',
'lassoapp_process',
'lassoapp_unitize',
'layout_name',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'ldml',
'ldml_ldml',
'library',
'library_once',
'link',
'link_currentaction',
'link_currentactionparams',
'link_currentactionurl',
'link_currentgroup',
'link_currentgroupparams',
'link_currentgroupurl',
'link_currentrecord',
'link_currentrecordparams',
'link_currentrecordurl',
'link_currentsearch',
'link_currentsearchparams',
'link_currentsearchurl',
'link_detail',
'link_detailparams',
'link_detailurl',
'link_firstgroup',
'link_firstgroupparams',
'link_firstgroupurl',
'link_firstrecord',
'link_firstrecordparams',
'link_firstrecordurl',
'link_lastgroup',
'link_lastgroupparams',
'link_lastgroupurl',
'link_lastrecord',
'link_lastrecordparams',
'link_lastrecordurl',
'link_nextgroup',
'link_nextgroupparams',
'link_nextgroupurl',
'link_nextrecord',
'link_nextrecordparams',
'link_nextrecordurl',
'link_params',
'link_prevgroup',
'link_prevgroupparams',
'link_prevgroupurl',
'link_prevrecord',
'link_prevrecordparams',
'link_prevrecordurl',
'link_setformat',
'link_url',
'list',
'list_additem',
'list_fromlist',
'list_fromstring',
'list_getitem',
'list_itemcount',
'list_iterator',
'list_removeitem',
'list_replaceitem',
'list_reverseiterator',
'list_tostring',
'literal',
'ljax_end',
'ljax_hastarget',
'ljax_include',
'ljax_start',
'ljax_target',
'local',
'local_defined',
'local_remove',
'local_reset',
'locale_format',
'locals',
'log',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_setdestination',
'log_sql',
'log_warning',
'logicalop_value',
'logicaloperator_value',
'loop',
'loop_abort',
'loop_continue',
'loop_count',
'lt',
'lte',
'magick_image',
'map',
'map_iterator',
'match_comparator',
'match_notrange',
'match_notregexp',
'match_range',
'match_regexp',
'math_abs',
'math_acos',
'math_add',
'math_asin',
'math_atan',
'math_atan2',
'math_ceil',
'math_converteuro',
'math_cos',
'math_div',
'math_exp',
'math_floor',
'math_internal_rand',
'math_internal_randmax',
'math_internal_srand',
'math_ln',
'math_log',
'math_log10',
'math_max',
'math_min',
'math_mod',
'math_mult',
'math_pow',
'math_random',
'math_range',
'math_rint',
'math_roman',
'math_round',
'math_sin',
'math_sqrt',
'math_sub',
'math_tan',
'maxrecords_value',
'memory_session_driver',
'mime_type',
'minimal',
'misc__srand',
'misc_randomnumber',
'misc_roman',
'misc_valid_creditcard',
'mysql_session_driver',
'named_param',
'namespace_current',
'namespace_delimiter',
'namespace_exists',
'namespace_file_fullpathexists',
'namespace_global',
'namespace_import',
'namespace_load',
'namespace_page',
'namespace_unload',
'namespace_using',
'neq',
'net',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'no_default_output',
'none',
'noprocess',
'not',
'nrx',
'nslookup',
'null',
'object',
'once',
'oneoff',
'op_logicalvalue',
'operator_logicalvalue',
'option',
'or',
'os_process',
'output',
'output_none',
'pair',
'params_up',
'pdf_barcode',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_serve',
'pdf_table',
'pdf_text',
'percent',
'portal',
'postcondition',
'precondition',
'prettyprintingnsmap',
'prettyprintingtypemap',
'priorityqueue',
'private',
'proc_convert',
'proc_convertbody',
'proc_convertone',
'proc_extract',
'proc_extractone',
'proc_find',
'proc_first',
'proc_foreach',
'proc_get',
'proc_join',
'proc_lasso',
'proc_last',
'proc_map_entry',
'proc_null',
'proc_regexp',
'proc_xml',
'proc_xslt',
'process',
'protect',
'queue',
'rand',
'randomnumber',
'raw',
'recid_value',
'record_count',
'recordcount',
'recordid_value',
'records',
'records_array',
'records_map',
'redirect_url',
'reference',
'referer',
'referer_url',
'referrer',
'referrer_url',
'regexp',
'repeating',
'repeating_valueitem',
'repeatingvalueitem',
'repetition',
'req_column',
'req_field',
'required_column',
'required_field',
'response_fileexists',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'resultset',
'resultset_count',
'return',
'return_value',
'reverseiterator',
'roman',
'row_count',
'rows',
'rows_array',
'run_children',
'rx',
'schema_name',
'scientific',
'search_args',
'search_arguments',
'search_columnitem',
'search_fielditem',
'search_operatoritem',
'search_opitem',
'search_valueitem',
'searchfielditem',
'searchoperatoritem',
'searchopitem',
'searchvalueitem',
'select',
'selected',
'self',
'serialize',
'series',
'server_date',
'server_day',
'server_ip',
'server_name',
'server_port',
'server_push',
'server_siteisrunning',
'server_sitestart',
'server_sitestop',
'server_time',
'session_abort',
'session_addoutputfilter',
'session_addvar',
'session_addvariable',
'session_deleteexpired',
'session_driver',
'session_end',
'session_id',
'session_removevar',
'session_removevariable',
'session_result',
'session_setdriver',
'session_start',
'set',
'set_iterator',
'set_reverseiterator',
'shown_count',
'shown_first',
'shown_last',
'site_atbegin',
'site_id',
'site_name',
'site_restart',
'skiprecords_value',
'sleep',
'soap_convertpartstopairs',
'soap_definetag',
'soap_info',
'soap_lastrequest',
'soap_lastresponse',
'soap_stub',
'sort_args',
'sort_arguments',
'sort_columnitem',
'sort_fielditem',
'sort_orderitem',
'sortcolumnitem',
'sortfielditem',
'sortorderitem',
'sqlite_createdb',
'sqlite_session_driver',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'srand',
'stack',
'stock_quote',
'string',
'string_charfromname',
'string_concatenate',
'string_countfields',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_fordigit',
'string_getfield',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_lowercase',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_validcharset',
'table_name',
'table_realname',
'tag',
'tag_name',
'tags',
'tags_find',
'tags_list',
'tcp_close',
'tcp_open',
'tcp_send',
'tcp_tcp_close',
'tcp_tcp_open',
'tcp_tcp_send',
'thread_abort',
'thread_atomic',
'thread_event',
'thread_exists',
'thread_getcurrentid',
'thread_getpriority',
'thread_info',
'thread_list',
'thread_lock',
'thread_pipe',
'thread_priority_default',
'thread_priority_high',
'thread_priority_low',
'thread_rwlock',
'thread_semaphore',
'thread_setpriority',
'token_value',
'total_records',
'treemap',
'treemap_iterator',
'true',
'url_rewrite',
'valid_creditcard',
'valid_date',
'valid_email',
'valid_url',
'value_list',
'value_listitem',
'valuelistitem',
'var',
'var_defined',
'var_remove',
'var_reset',
'var_set',
'variable',
'variable_defined',
'variable_set',
'variables',
'variant_count',
'vars',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'while',
'wsdl_extract',
'wsdl_getbinding',
'wsdl_getbindingforoperation',
'wsdl_getbindingoperations',
'wsdl_getmessagenamed',
'wsdl_getmessageparts',
'wsdl_getmessagetriofromporttype',
'wsdl_getopbodystyle',
'wsdl_getopbodyuse',
'wsdl_getoperation',
'wsdl_getoplocation',
'wsdl_getopmessagetypes',
'wsdl_getopsoapaction',
'wsdl_getportaddress',
'wsdl_getportsforservice',
'wsdl_getporttype',
'wsdl_getporttypeoperation',
'wsdl_getservicedocumentation',
'wsdl_getservices',
'wsdl_gettargetnamespace',
'wsdl_issoapoperation',
'wsdl_listoperations',
'wsdl_maketest',
'xml',
'xml_extract',
'xml_rpc',
'xml_rpccall',
'xml_rw',
'xml_serve',
'xml_transform',
'xml_xml',
'xml_xmlstream',
'xmlstream',
'xsd_attribute',
'xsd_blankarraybase',
'xsd_blankbase',
'xsd_buildtype',
'xsd_cache',
'xsd_checkcardinality',
'xsd_continueall',
'xsd_continueannotation',
'xsd_continueany',
'xsd_continueanyattribute',
'xsd_continueattribute',
'xsd_continueattributegroup',
'xsd_continuechoice',
'xsd_continuecomplexcontent',
'xsd_continuecomplextype',
'xsd_continuedocumentation',
'xsd_continueextension',
'xsd_continuegroup',
'xsd_continuekey',
'xsd_continuelist',
'xsd_continuerestriction',
'xsd_continuesequence',
'xsd_continuesimplecontent',
'xsd_continuesimpletype',
'xsd_continueunion',
'xsd_deserialize',
'xsd_fullyqualifyname',
'xsd_generate',
'xsd_generateblankfromtype',
'xsd_generateblanksimpletype',
'xsd_generatetype',
'xsd_getschematype',
'xsd_issimpletype',
'xsd_loadschema',
'xsd_lookupnamespaceuri',
'xsd_lookuptype',
'xsd_processany',
'xsd_processattribute',
'xsd_processattributegroup',
'xsd_processcomplextype',
'xsd_processelement',
'xsd_processgroup',
'xsd_processimport',
'xsd_processinclude',
'xsd_processschema',
'xsd_processsimpletype',
'xsd_ref',
'xsd_type'
]
}
MEMBERS = {
'Member Methods': [
'escape_member',
'oncompare',
'sameas',
'isa',
'ascopy',
'asstring',
'ascopydeep',
'type',
'trait',
'parent',
'settrait',
'oncreate',
'listmethods',
'hasmethod',
'invoke',
'addtrait',
'isnota',
'isallof',
'isanyof',
'size',
'gettype',
'istype',
'doccomment',
'requires',
'provides',
'name',
'subtraits',
'description',
'hash',
'hosttonet16',
'hosttonet32',
'nettohost16',
'nettohost32',
'nettohost64',
'hosttonet64',
'bitset',
'bittest',
'bitflip',
'bitclear',
'bitor',
'bitand',
'bitxor',
'bitnot',
'bitshiftleft',
'bitshiftright',
'bytes',
'abs',
'div',
'dereferencepointer',
'asdecimal',
'serializationelements',
'acceptdeserializedelement',
'serialize',
'deg2rad',
'asstringhex',
'asstringoct',
'acos',
'asin',
'atan',
'atan2',
'ceil',
'cos',
'cosh',
'exp',
'fabs',
'floor',
'frexp',
'ldexp',
'log',
'log10',
'modf',
'pow',
'sin',
'sinh',
'sqrt',
'tan',
'tanh',
'erf',
'erfc',
'gamma',
'hypot',
'j0',
'j1',
'jn',
'lgamma',
'y0',
'y1',
'yn',
'isnan',
'acosh',
'asinh',
'atanh',
'cbrt',
'expm1',
'nextafter',
'scalb',
'ilogb',
'log1p',
'logb',
'remainder',
'rint',
'asinteger',
'self',
'detach',
'restart',
'resume',
'continuation',
'home',
'callsite_file',
'callsite_line',
'callsite_col',
'callstack',
'splitthread',
'threadreaddesc',
'givenblock',
'autocollectbuffer',
'calledname',
'methodname',
'invokeuntil',
'invokewhile',
'invokeautocollect',
'asasync',
'append',
'appendchar',
'private_find',
'private_findlast',
'length',
'chardigitvalue',
'private_compare',
'remove',
'charname',
'chartype',
'decompose',
'normalize',
'digit',
'foldcase',
'sub',
'integer',
'private_merge',
'unescape',
'trim',
'titlecase',
'reverse',
'getisocomment',
'getnumericvalue',
'totitle',
'toupper',
'tolower',
'lowercase',
'uppercase',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isxdigit',
'islower',
'isprint',
'isspace',
'istitle',
'ispunct',
'isgraph',
'isblank',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'encodehtml',
'decodehtml',
'encodexml',
'decodexml',
'encodehtmltoxml',
'getpropertyvalue',
'hasbinaryproperty',
'asbytes',
'find',
'findlast',
'contains',
'get',
'equals',
'compare',
'comparecodepointorder',
'padleading',
'padtrailing',
'merge',
'split',
'removeleading',
'removetrailing',
'beginswith',
'endswith',
'replace',
'values',
'foreachcharacter',
'foreachlinebreak',
'foreachwordbreak',
'eachwordbreak',
'eachcharacter',
'foreachmatch',
'eachmatch',
'encodesql92',
'encodesql',
'keys',
'decomposeassignment',
'firstcomponent',
'ifempty',
'eachsub',
'stripfirstcomponent',
'isnotempty',
'first',
'lastcomponent',
'foreachpathcomponent',
'isfullpath',
'back',
'second',
'componentdelimiter',
'isempty',
'foreachsub',
'front',
'striplastcomponent',
'eachcomponent',
'eachline',
'splitextension',
'hastrailingcomponent',
'last',
'ifnotempty',
'extensiondelimiter',
'eachword',
'substring',
'setsize',
'reserve',
'getrange',
'private_setrange',
'importas',
'import8bits',
'import32bits',
'import64bits',
'import16bits',
'importbytes',
'importpointer',
'export8bits',
'export16bits',
'export32bits',
'export64bits',
'exportbytes',
'exportsigned8bits',
'exportsigned16bits',
'exportsigned32bits',
'exportsigned64bits',
'marker',
'swapbytes',
'encodeurl',
'decodeurl',
'encodebase64',
'decodebase64',
'encodeqp',
'decodeqp',
'encodemd5',
'encodehex',
'decodehex',
'uncompress',
'compress',
'detectcharset',
'bestcharset',
'crc',
'importstring',
'setrange',
'exportas',
'exportstring',
'exportpointerbits',
'foreachbyte',
'eachbyte',
'setposition',
'position',
'value',
'join',
'asstaticarray',
'foreach',
'findposition',
'min',
'groupjoin',
'orderbydescending',
'average',
'take',
'do',
'selectmany',
'skip',
'select',
'sum',
'max',
'asarray',
'thenbydescending',
'aslist',
'orderby',
'thenby',
'where',
'groupby',
'asgenerator',
'typename',
'returntype',
'restname',
'paramdescs',
'action',
'statement',
'inputcolumns',
'keycolumns',
'returncolumns',
'sortcolumns',
'skiprows',
'maxrows',
'rowsfound',
'statementonly',
'lop',
'databasename',
'tablename',
'schemaname',
'hostid',
'hostdatasource',
'hostname',
'hostport',
'hostusername',
'hostpassword',
'hostschema',
'hosttableencoding',
'hostextra',
'hostisdynamic',
'refobj',
'connection',
'prepared',
'getset',
'addset',
'numsets',
'addrow',
'addcolumninfo',
'forcedrowid',
'makeinheritedcopy',
'filename',
'expose',
'recover',
'insert',
'removeall',
'count',
'exchange',
'findindex',
'foreachpair',
'foreachkey',
'sort',
'insertfirst',
'difference',
'removeback',
'insertback',
'removelast',
'removefront',
'insertfrom',
'intersection',
'top',
'insertlast',
'push',
'union',
'removefirst',
'insertfront',
'pop',
'fd',
'family',
'isvalid',
'isssl',
'open',
'close',
'read',
'write',
'ioctl',
'seek',
'mode',
'mtime',
'atime',
'dup',
'dup2',
'fchdir',
'fchown',
'fsync',
'ftruncate',
'fchmod',
'sendfd',
'receivefd',
'readobject',
'tryreadobject',
'writeobject',
'leaveopen',
'rewind',
'tell',
'language',
'script',
'country',
'variant',
'displaylanguage',
'displayscript',
'displaycountry',
'displayvariant',
'displayname',
'basename',
'keywords',
'iso3language',
'iso3country',
'formatas',
'formatnumber',
'parsenumber',
'parseas',
'format',
'parse',
'add',
'roll',
'set',
'getattr',
'setattr',
'clear',
'isset',
'settimezone',
'timezone',
'time',
'indaylighttime',
'createdocument',
'parsedocument',
'hasfeature',
'createdocumenttype',
'nodename',
'nodevalue',
'nodetype',
'parentnode',
'childnodes',
'firstchild',
'lastchild',
'previoussibling',
'nextsibling',
'attributes',
'ownerdocument',
'namespaceuri',
'prefix',
'localname',
'insertbefore',
'replacechild',
'removechild',
'appendchild',
'haschildnodes',
'clonenode',
'issupported',
'hasattributes',
'extract',
'extractone',
'extractfast',
'transform',
'foreachchild',
'eachchild',
'extractfastone',
'data',
'substringdata',
'appenddata',
'insertdata',
'deletedata',
'replacedata',
'doctype',
'implementation',
'documentelement',
'createelement',
'createdocumentfragment',
'createtextnode',
'createcomment',
'createcdatasection',
'createprocessinginstruction',
'createattribute',
'createentityreference',
'getelementsbytagname',
'importnode',
'createelementns',
'createattributens',
'getelementsbytagnamens',
'getelementbyid',
'tagname',
'getattribute',
'setattribute',
'removeattribute',
'getattributenode',
'setattributenode',
'removeattributenode',
'getattributens',
'setattributens',
'removeattributens',
'getattributenodens',
'setattributenodens',
'hasattribute',
'hasattributens',
'setname',
'contents',
'specified',
'ownerelement',
'splittext',
'notationname',
'publicid',
'systemid',
'target',
'entities',
'notations',
'internalsubset',
'item',
'getnameditem',
'getnameditemns',
'setnameditem',
'setnameditemns',
'removenameditem',
'removenameditemns',
'askeyedgenerator',
'eachpair',
'eachkey',
'next',
'readstring',
'readattributevalue',
'attributecount',
'baseuri',
'depth',
'hasvalue',
'isemptyelement',
'xmllang',
'getattributenamespace',
'lookupnamespace',
'movetoattribute',
'movetoattributenamespace',
'movetofirstattribute',
'movetonextattribute',
'movetoelement',
'prepare',
'last_insert_rowid',
'total_changes',
'interrupt',
'errcode',
'errmsg',
'addmathfunctions',
'finalize',
'step',
'bind_blob',
'bind_double',
'bind_int',
'bind_null',
'bind_text',
'bind_parameter_index',
'reset',
'column_count',
'column_name',
'column_decltype',
'column_blob',
'column_double',
'column_int64',
'column_text',
'column_type',
'ismultipart',
'gotfileupload',
'setmaxfilesize',
'getparts',
'trackingid',
'currentfile',
'addtobuffer',
'input',
'replacepattern',
'findpattern',
'ignorecase',
'setinput',
'setreplacepattern',
'setfindpattern',
'setignorecase',
'output',
'appendreplacement',
'matches',
'private_replaceall',
'appendtail',
'groupcount',
'matchposition',
'matchesstart',
'private_replacefirst',
'private_split',
'matchstring',
'replaceall',
'replacefirst',
'findall',
'findcount',
'findfirst',
'findsymbols',
'loadlibrary',
'getlibrary',
'atend',
'f',
'r',
'form',
'gen',
'callfirst',
'key',
'by',
'from',
'init',
'to',
'd',
't',
'object',
'inneroncompare',
'members',
'writeid',
'addmember',
'refid',
'index',
'objects',
'tabs',
'trunk',
'trace',
'asxml',
'tabstr',
'toxmlstring',
'document',
'idmap',
'readidobjects',
'left',
'right',
'up',
'red',
'root',
'getnode',
'firstnode',
'lastnode',
'nextnode',
'private_rebalanceforremove',
'private_rotateleft',
'private_rotateright',
'private_rebalanceforinsert',
'eachnode',
'foreachnode',
'encoding',
'resolvelinks',
'readbytesfully',
'dowithclose',
'readsomebytes',
'readbytes',
'writestring',
'parentdir',
'aslazystring',
'path',
'openread',
'openwrite',
'openwriteonly',
'openappend',
'opentruncate',
'writebytes',
'exists',
'modificationtime',
'lastaccesstime',
'modificationdate',
'lastaccessdate',
'delete',
'moveto',
'copyto',
'linkto',
'flush',
'chmod',
'chown',
'isopen',
'setmarker',
'setmode',
'foreachline',
'lock',
'unlock',
'trylock',
'testlock',
'perms',
'islink',
'isdir',
'realpath',
'openwith',
'asraw',
'rawdiff',
'getformat',
'setformat',
'subtract',
'gmt',
'dst',
'era',
'year',
'month',
'week',
'weekofyear',
'weekofmonth',
'day',
'dayofmonth',
'dayofyear',
'dayofweek',
'dayofweekinmonth',
'ampm',
'am',
'pm',
'hour',
'hourofday',
'hourofampm',
'minute',
'millisecond',
'zoneoffset',
'dstoffset',
'yearwoy',
'dowlocal',
'extendedyear',
'julianday',
'millisecondsinday',
'firstdayofweek',
'fixformat',
'minutesbetween',
'hoursbetween',
'secondsbetween',
'daysbetween',
'businessdaysbetween',
'pdifference',
'getfield',
'create',
'setcwd',
'foreachentry',
'eachpath',
'eachfilepath',
'eachdirpath',
'each',
'eachfile',
'eachdir',
'eachpathrecursive',
'eachfilepathrecursive',
'eachdirpathrecursive',
'eachentry',
'makefullpath',
'annotate',
'blur',
'command',
'composite',
'contrast',
'convert',
'crop',
'execute',
'enhance',
'flipv',
'fliph',
'modulate',
'rotate',
'save',
'scale',
'sharpen',
'addcomment',
'comments',
'describe',
'file',
'height',
'pixel',
'resolutionv',
'resolutionh',
'width',
'setcolorspace',
'colorspace',
'debug',
'histogram',
'imgptr',
'appendimagetolist',
'fx',
'applyheatcolors',
'authenticate',
'search',
'searchurl',
'readerror',
'readline',
'setencoding',
'closewrite',
'exitcode',
'getversion',
'findclass',
'throw',
'thrownew',
'exceptionoccurred',
'exceptiondescribe',
'exceptionclear',
'fatalerror',
'newglobalref',
'deleteglobalref',
'deletelocalref',
'issameobject',
'allocobject',
'newobject',
'getobjectclass',
'isinstanceof',
'getmethodid',
'callobjectmethod',
'callbooleanmethod',
'callbytemethod',
'callcharmethod',
'callshortmethod',
'callintmethod',
'calllongmethod',
'callfloatmethod',
'calldoublemethod',
'callvoidmethod',
'callnonvirtualobjectmethod',
'callnonvirtualbooleanmethod',
'callnonvirtualbytemethod',
'callnonvirtualcharmethod',
'callnonvirtualshortmethod',
'callnonvirtualintmethod',
'callnonvirtuallongmethod',
'callnonvirtualfloatmethod',
'callnonvirtualdoublemethod',
'callnonvirtualvoidmethod',
'getfieldid',
'getobjectfield',
'getbooleanfield',
'getbytefield',
'getcharfield',
'getshortfield',
'getintfield',
'getlongfield',
'getfloatfield',
'getdoublefield',
'setobjectfield',
'setbooleanfield',
'setbytefield',
'setcharfield',
'setshortfield',
'setintfield',
'setlongfield',
'setfloatfield',
'setdoublefield',
'getstaticmethodid',
'callstaticobjectmethod',
'callstaticbooleanmethod',
'callstaticbytemethod',
'callstaticcharmethod',
'callstaticshortmethod',
'callstaticintmethod',
'callstaticlongmethod',
'callstaticfloatmethod',
'callstaticdoublemethod',
'callstaticvoidmethod',
'getstaticfieldid',
'getstaticobjectfield',
'getstaticbooleanfield',
'getstaticbytefield',
'getstaticcharfield',
'getstaticshortfield',
'getstaticintfield',
'getstaticlongfield',
'getstaticfloatfield',
'getstaticdoublefield',
'setstaticobjectfield',
'setstaticbooleanfield',
'setstaticbytefield',
'setstaticcharfield',
'setstaticshortfield',
'setstaticintfield',
'setstaticlongfield',
'setstaticfloatfield',
'setstaticdoublefield',
'newstring',
'getstringlength',
'getstringchars',
'getarraylength',
'newobjectarray',
'getobjectarrayelement',
'setobjectarrayelement',
'newbooleanarray',
'newbytearray',
'newchararray',
'newshortarray',
'newintarray',
'newlongarray',
'newfloatarray',
'newdoublearray',
'getbooleanarrayelements',
'getbytearrayelements',
'getchararrayelements',
'getshortarrayelements',
'getintarrayelements',
'getlongarrayelements',
'getfloatarrayelements',
'getdoublearrayelements',
'getbooleanarrayregion',
'getbytearrayregion',
'getchararrayregion',
'getshortarrayregion',
'getintarrayregion',
'getlongarrayregion',
'getfloatarrayregion',
'getdoublearrayregion',
'setbooleanarrayregion',
'setbytearrayregion',
'setchararrayregion',
'setshortarrayregion',
'setintarrayregion',
'setlongarrayregion',
'setfloatarrayregion',
'setdoublearrayregion',
'monitorenter',
'monitorexit',
'fromreflectedmethod',
'fromreflectedfield',
'toreflectedmethod',
'toreflectedfield',
'exceptioncheck',
'dbtablestable',
'dstable',
'dsdbtable',
'dshoststable',
'fieldstable',
'sql',
'adddatasource',
'loaddatasourceinfo',
'loaddatasourcehostinfo',
'getdatasource',
'getdatasourceid',
'getdatasourcename',
'listdatasources',
'listactivedatasources',
'removedatasource',
'listdatasourcehosts',
'listhosts',
'adddatasourcehost',
'getdatasourcehost',
'removedatasourcehost',
'getdatabasehost',
'gethostdatabase',
'listalldatabases',
'listdatasourcedatabases',
'listhostdatabases',
'getdatasourcedatabase',
'getdatasourcedatabasebyid',
'getdatabasebyname',
'getdatabasebyid',
'getdatabasebyalias',
'adddatasourcedatabase',
'removedatasourcedatabase',
'listalltables',
'listdatabasetables',
'getdatabasetable',
'getdatabasetablebyalias',
'getdatabasetablebyid',
'gettablebyid',
'adddatabasetable',
'removedatabasetable',
'removefield',
'maybevalue',
'getuniquealiasname',
'makecolumnlist',
'makecolumnmap',
'datasourcecolumns',
'datasourcemap',
'hostcolumns',
'hostmap',
'hostcolumns2',
'hostmap2',
'databasecolumns',
'databasemap',
'tablecolumns',
'tablemap',
'databasecolumnnames',
'hostcolumnnames',
'hostcolumnnames2',
'datasourcecolumnnames',
'tablecolumnnames',
'bindcount',
'sqlite3',
'db',
'tables',
'hastable',
'tablehascolumn',
'eachrow',
'bindparam',
'foreachrow',
'executelazy',
'executenow',
'lastinsertid',
'table',
'bindone',
'src',
'stat',
'colmap',
'getcolumn',
'locals',
'getcolumns',
'bodybytes',
'headerbytes',
'ready',
'token',
'url',
'done',
'header',
'result',
'statuscode',
'raw',
'version',
'perform',
'performonce',
's',
'linediffers',
'sourcefile',
'sourceline',
'sourcecolumn',
'continuationpacket',
'continuationpoint',
'continuationstack',
'features',
'lastpoint',
'net',
'running',
'source',
'run',
'pathtouri',
'sendpacket',
'readpacket',
'handlefeatureset',
'handlefeatureget',
'handlestdin',
'handlestdout',
'handlestderr',
'isfirststep',
'handlecontinuation',
'ensurestopped',
'handlestackget',
'handlecontextnames',
'formatcontextelements',
'formatcontextelement',
'bptypetostr',
'bptoxml',
'handlebreakpointlist',
'handlebreakpointget',
'handlebreakpointremove',
'condtoint',
'inttocond',
'handlebreakpointupdate',
'handlebreakpointset',
'handlecontextget',
'handlesource',
'error',
'setstatus',
'getstatus',
'stoprunning',
'pollide',
'polldbg',
'runonce',
'arguments',
'id',
'argumentvalue',
'end',
'start',
'days',
'foreachday',
'padzero',
'actionparams',
'capi',
'doclose',
'dsinfo',
'isnothing',
'named',
'workinginputcolumns',
'workingkeycolumns',
'workingreturncolumns',
'workingsortcolumns',
'workingkeyfield_name',
'scanfordatasource',
'configureds',
'configuredskeys',
'scrubkeywords',
'closeprepared',
'filterinputcolumn',
'prev',
'head',
'removenode',
'listnode',
'bind',
'listen',
'remoteaddress',
'shutdownrdwr',
'shutdownwr',
'shutdownrd',
'localaddress',
'accept',
'connect',
'foreachaccept',
'writeobjecttcp',
'readobjecttcp',
'begintls',
'endtls',
'loadcerts',
'sslerrfail',
'fromname',
'fromport',
'env',
'checked',
'getclass',
'jobjectisa',
'new',
'callvoid',
'callint',
'callfloat',
'callboolean',
'callobject',
'callstring',
'callstaticobject',
'callstaticstring',
'callstaticint',
'callstaticboolean',
'chk',
'makecolor',
'realdoc',
'addbarcode',
'addchapter',
'addcheckbox',
'addcombobox',
'addhiddenfield',
'addimage',
'addlist',
'addpage',
'addparagraph',
'addpasswordfield',
'addphrase',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsubmitbutton',
'addtable',
'addtextarea',
'addtextfield',
'addtext',
'arc',
'circle',
'closepath',
'curveto',
'drawtext',
'getcolor',
'getheader',
'getheaders',
'getmargins',
'getpagenumber',
'getsize',
'insertpage',
'line',
'rect',
'setcolor',
'setfont',
'setlinewidth',
'setpagenumber',
'conventionaltop',
'lowagiefont',
'jcolor',
'jbarcode',
'generatechecksum',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getcode',
'getfont',
'gettextalignment',
'gettextsize',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setcode',
'setgeneratechecksum',
'setshowchecksum',
'settextalignment',
'settextsize',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'jfont',
'getencoding',
'getface',
'getfullfontname',
'getpsfontname',
'getsupportedencodings',
'istruetype',
'getstyle',
'getbold',
'getitalic',
'getunderline',
'setface',
'setunderline',
'setbold',
'setitalic',
'textwidth',
'jimage',
'ontop',
'jlist',
'jread',
'addjavascript',
'exportfdf',
'extractimage',
'fieldnames',
'fieldposition',
'fieldtype',
'fieldvalue',
'gettext',
'importfdf',
'javascript',
'pagecount',
'pagerotation',
'pagesize',
'setfieldvalue',
'setpagerange',
'jtable',
'getabswidth',
'getalignment',
'getbordercolor',
'getborderwidth',
'getcolumncount',
'getpadding',
'getrowcount',
'getspacing',
'setalignment',
'setbordercolor',
'setborderwidth',
'setpadding',
'setspacing',
'jtext',
'element',
'foreachspool',
'unspool',
'err',
'in',
'out',
'pid',
'wait',
'testexitcode',
'maxworkers',
'tasks',
'workers',
'startone',
'addtask',
'waitforcompletion',
'isidle',
'scanworkers',
'scantasks',
'z',
'addfile',
'adddir',
'adddirpath',
'foreachfile',
'foreachfilename',
'eachfilename',
'filenames',
'getfile',
'meta',
'criteria',
'map',
'valid',
'lazyvalue',
'dns_response',
'qdcount',
'qdarray',
'answer',
'bitformat',
'consume_rdata',
'consume_string',
'consume_label',
'consume_domain',
'consume_message',
'errors',
'warnings',
'addwarning',
'adderror',
'renderbytes',
'renderstring',
'components',
'addcomponent',
'addcomponents',
'body',
'renderdocumentbytes',
'contenttype',
'mime_boundary',
'mime_contenttype',
'mime_hdrs',
'addtextpart',
'addhtmlpart',
'addattachment',
'addpart',
'recipients',
'pop_capa',
'pop_debug',
'pop_err',
'pop_get',
'pop_ids',
'pop_index',
'pop_log',
'pop_mode',
'pop_net',
'pop_res',
'pop_server',
'pop_timeout',
'pop_token',
'pop_cmd',
'user',
'pass',
'apop',
'auth',
'quit',
'rset',
'list',
'uidl',
'retr',
'dele',
'noop',
'capa',
'stls',
'authorize',
'retrieve',
'headers',
'uniqueid',
'capabilities',
'cancel',
'results',
'lasterror',
'parse_body',
'parse_boundary',
'parse_charset',
'parse_content_disposition',
'parse_content_transfer_encoding',
'parse_content_type',
'parse_hdrs',
'parse_mode',
'parse_msg',
'parse_parts',
'parse_rawhdrs',
'rawheaders',
'content_type',
'content_transfer_encoding',
'content_disposition',
'boundary',
'charset',
'cc',
'subject',
'bcc',
'date',
'pause',
'continue',
'touch',
'refresh',
'queue',
'status',
'queue_status',
'active_tick',
'getprefs',
'initialize',
'queue_maintenance',
'queue_messages',
'content',
'rectype',
'requestid',
'cachedappprefix',
'cachedroot',
'cookiesary',
'fcgireq',
'fileuploadsary',
'headersmap',
'httpauthorization',
'postparamsary',
'queryparamsary',
'documentroot',
'appprefix',
'httpconnection',
'httpcookie',
'httphost',
'httpuseragent',
'httpcachecontrol',
'httpreferer',
'httpreferrer',
'contentlength',
'pathtranslated',
'remoteaddr',
'remoteport',
'requestmethod',
'requesturi',
'scriptfilename',
'scriptname',
'scripturi',
'scripturl',
'serveraddr',
'serveradmin',
'servername',
'serverport',
'serverprotocol',
'serversignature',
'serversoftware',
'pathinfo',
'gatewayinterface',
'httpaccept',
'httpacceptencoding',
'httpacceptlanguage',
'ishttps',
'cookies',
'cookie',
'rawheader',
'queryparam',
'postparam',
'param',
'queryparams',
'querystring',
'postparams',
'poststring',
'params',
'fileuploads',
'isxhr',
'reqid',
'statusmsg',
'requestparams',
'stdin',
'mimes',
'writeheaderline',
'writeheaderbytes',
'writebodybytes',
'cap',
'n',
'proxying',
'stop',
'printsimplemsg',
'handleevalexpired',
'handlenormalconnection',
'handledevconnection',
'splittoprivatedev',
'getmode',
'curl',
'novaluelists',
'makeurl',
'choosecolumntype',
'getdatabasetablepart',
'getlcapitype',
'buildquery',
'getsortfieldspart',
'endjs',
'title',
'addjs',
'addjstext',
'addendjs',
'addendjstext',
'addcss',
'addfavicon',
'attrs',
'dtdid',
'lang',
'xhtml',
'style',
'gethtmlattr',
'hashtmlattr',
'onmouseover',
'onkeydown',
'dir',
'onclick',
'onkeypress',
'onmouseout',
'onkeyup',
'onmousemove',
'onmouseup',
'ondblclick',
'onmousedown',
'sethtmlattr',
'class',
'gethtmlattrstring',
'tag',
'code',
'msg',
'scripttype',
'defer',
'httpequiv',
'scheme',
'href',
'hreflang',
'linktype',
'rel',
'rev',
'media',
'declare',
'classid',
'codebase',
'objecttype',
'codetype',
'archive',
'standby',
'usemap',
'tabindex',
'styletype',
'method',
'enctype',
'accept_charset',
'onsubmit',
'onreset',
'accesskey',
'inputtype',
'maxlength',
'for',
'selected',
'label',
'multiple',
'buff',
'wroteheaders',
'pullrequest',
'pullrawpost',
'shouldclose',
'pullurlpost',
'pullmimepost',
'pullhttpheader',
'pulloneheaderline',
'parseoneheaderline',
'addoneheaderline',
'safeexport8bits',
'writeheader',
'fail',
'connhandler',
'port',
'connectionhandler',
'acceptconnections',
'gotconnection',
'failnoconnectionhandler',
'splitconnection',
'scriptextensions',
'sendfile',
'probemimetype',
'appname',
'inits',
'installs',
'rootmap',
'install',
'getappsource',
'preflight',
'splituppath',
'handleresource',
'handledefinitionhead',
'handledefinitionbody',
'handledefinitionresource',
'execinstalls',
'execinits',
'payload',
'fullpath',
'resourcename',
'issourcefile',
'resourceinvokable',
'srcpath',
'resources',
'eligiblepath',
'eligiblepaths',
'expiresminutes',
'moddatestr',
'zips',
'addzip',
'getzipfilebytes',
'resourcedata',
'zip',
'zipfile',
'zipname',
'zipfilename',
'rawinvokable',
'route',
'setdestination',
'getprowcount',
'encodepassword',
'checkuser',
'needinitialization',
'adduser',
'getuserid',
'getuser',
'getuserbykey',
'removeuser',
'listusers',
'listusersbygroup',
'countusersbygroup',
'addgroup',
'updategroup',
'getgroupid',
'getgroup',
'removegroup',
'listgroups',
'listgroupsbyuser',
'addusertogroup',
'removeuserfromgroup',
'removeuserfromallgroups',
'md5hex',
'usercolumns',
'groupcolumns',
'expireminutes',
'lasttouched',
'hasexpired',
'idealinmemory',
'maxinmemory',
'nextprune',
'nextprunedelta',
'sessionsdump',
'startup',
'validatesessionstable',
'createtable',
'fetchdata',
'savedata',
'kill',
'expire',
'prune',
'entry',
'host',
'tb',
'setdefaultstorage',
'getdefaultstorage',
'onconvert',
'send',
'nodelist',
'delim',
'subnode',
'subnodes',
'addsubnode',
'removesubnode',
'nodeforpath',
'representnoderesult',
'mime',
'extensions',
'representnode',
'jsonfornode',
'defaultcontentrepresentation',
'supportscontentrepresentation',
'htmlcontent',
'appmessage',
'appstatus',
'atends',
'chunked',
'cookiesarray',
'didinclude',
'errstack',
'headersarray',
'includestack',
'outputencoding',
'sessionsmap',
'htmlizestacktrace',
'includes',
'respond',
'sendresponse',
'sendchunk',
'makecookieyumyum',
'getinclude',
'include',
'includeonce',
'includelibrary',
'includelibraryonce',
'includebytes',
'addatend',
'setcookie',
'addheader',
'replaceheader',
'setheaders',
'rawcontent',
'redirectto',
'htmlizestacktracelink',
'doatbegins',
'handlelassoappcontent',
'handlelassoappresponse',
'domainbody',
'establisherrorstate',
'tryfinderrorfile',
'doatends',
'dosessions',
'makenonrelative',
'pushinclude',
'popinclude',
'findinclude',
'checkdebugging',
'splitdebuggingthread',
'matchtriggers',
'rules',
'shouldabort',
'gettrigger',
'trigger',
'rule',
'foo',
'jsonlabel',
'jsonhtml',
'jsonisleaf',
'acceptpost',
'csscontent',
'jscontent'
],
'Lasso 8 Member Tags': [
'accept',
'add',
'addattachment',
'addattribute',
'addbarcode',
'addchapter',
'addcheckbox',
'addchild',
'addcombobox',
'addcomment',
'addcontent',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addlist',
'addnamespace',
'addnextsibling',
'addpage',
'addparagraph',
'addparenttype',
'addpart',
'addpasswordfield',
'addphrase',
'addprevsibling',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsibling',
'addsubmitbutton',
'addtable',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'alarms',
'annotate',
'answer',
'append',
'appendreplacement',
'appendtail',
'arc',
'asasync',
'astype',
'atbegin',
'atbottom',
'atend',
'atfarleft',
'atfarright',
'attop',
'attributecount',
'attributes',
'authenticate',
'authorize',
'backward',
'baseuri',
'bcc',
'beanproperties',
'beginswith',
'bind',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'boundary',
'bytes',
'call',
'cancel',
'capabilities',
'cc',
'chardigitvalue',
'charname',
'charset',
'chartype',
'children',
'circle',
'close',
'closepath',
'closewrite',
'code',
'colorspace',
'command',
'comments',
'compare',
'comparecodepointorder',
'compile',
'composite',
'connect',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'contents',
'contrast',
'convert',
'crop',
'curveto',
'data',
'date',
'day',
'daylights',
'dayofweek',
'dayofyear',
'decrement',
'delete',
'depth',
'describe',
'description',
'deserialize',
'detach',
'detachreference',
'difference',
'digit',
'document',
'down',
'drawtext',
'dst',
'dump',
'endswith',
'enhance',
'equals',
'errors',
'eval',
'events',
'execute',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportfdf',
'exportstring',
'extract',
'extractone',
'fieldnames',
'fieldtype',
'fieldvalue',
'file',
'find',
'findindex',
'findnamespace',
'findnamespacebyhref',
'findpattern',
'findposition',
'first',
'firstchild',
'fliph',
'flipv',
'flush',
'foldcase',
'foreach',
'format',
'forward',
'freebusies',
'freezetype',
'freezevalue',
'from',
'fulltype',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getattribute',
'getattributenamespace',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbordercolor',
'getborderwidth',
'getcode',
'getcolor',
'getcolumncount',
'getencoding',
'getface',
'getfont',
'getformat',
'getfullfontname',
'getheaders',
'getmargins',
'getmethod',
'getnumericvalue',
'getpadding',
'getpagenumber',
'getparams',
'getproperty',
'getpsfontname',
'getrange',
'getrowcount',
'getsize',
'getspacing',
'getsupportedencodings',
'gettextalignment',
'gettextsize',
'gettype',
'gmt',
'groupcount',
'hasattribute',
'haschildren',
'hasvalue',
'header',
'headers',
'height',
'histogram',
'hosttonet16',
'hosttonet32',
'hour',
'id',
'ignorecase',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importfdf',
'importstring',
'increment',
'input',
'insert',
'insertatcurrent',
'insertfirst',
'insertfrom',
'insertlast',
'insertpage',
'integer',
'intersection',
'invoke',
'isa',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isemptyelement',
'islower',
'isopen',
'isprint',
'isspace',
'istitle',
'istruetype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'iterator',
'javascript',
'join',
'journals',
'key',
'keys',
'last',
'lastchild',
'lasterror',
'left',
'length',
'line',
'listen',
'localaddress',
'localname',
'lock',
'lookupnamespace',
'lowercase',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'merge',
'millisecond',
'minute',
'mode',
'modulate',
'month',
'moveto',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'name',
'namespaces',
'namespaceuri',
'nettohost16',
'nettohost32',
'newchild',
'next',
'nextsibling',
'nodetype',
'open',
'output',
'padleading',
'padtrailing',
'pagecount',
'pagesize',
'paraminfo',
'params',
'parent',
'path',
'pixel',
'position',
'prefix',
'previoussibling',
'properties',
'rawheaders',
'read',
'readattributevalue',
'readerror',
'readfrom',
'readline',
'readlock',
'readstring',
'readunlock',
'recipients',
'rect',
'refcount',
'referrals',
'remoteaddress',
'remove',
'removeall',
'removeattribute',
'removechild',
'removecurrent',
'removefirst',
'removelast',
'removeleading',
'removenamespace',
'removetrailing',
'render',
'replace',
'replaceall',
'replacefirst',
'replacepattern',
'replacewith',
'reserve',
'reset',
'resolutionh',
'resolutionv',
'response',
'results',
'retrieve',
'returntype',
'reverse',
'reverseiterator',
'right',
'rotate',
'run',
'save',
'scale',
'search',
'second',
'send',
'serialize',
'set',
'setalignment',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setblocking',
'setbordercolor',
'setborderwidth',
'setbytes',
'setcode',
'setcolor',
'setcolorspace',
'setdatatype',
'setencoding',
'setface',
'setfieldvalue',
'setfont',
'setformat',
'setgeneratechecksum',
'setheight',
'setlassodata',
'setlinewidth',
'setmarker',
'setmode',
'setname',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setproperty',
'setrange',
'setshowchecksum',
'setsize',
'setspacing',
'settemplate',
'settemplatestr',
'settextalignment',
'settextdata',
'settextsize',
'settype',
'setunderline',
'setwidth',
'setxmldata',
'sharpen',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'signal',
'signalall',
'size',
'smooth',
'sort',
'sortwith',
'split',
'standards',
'steal',
'subject',
'substring',
'subtract',
'swapbytes',
'textwidth',
'time',
'timezones',
'titlecase',
'to',
'todos',
'tolower',
'totitle',
'toupper',
'transform',
'trim',
'type',
'unescape',
'union',
'uniqueid',
'unlock',
'unserialize',
'up',
'uppercase',
'value',
'values',
'valuetype',
'wait',
'waskeyword',
'week',
'width',
'write',
'writelock',
'writeto',
'writeunlock',
'xmllang',
'xmlschematype',
'year'
]
}
| mit | -3,760,406,231,728,194,000 | 24.265855 | 70 | 0.493369 | false |
sbuss/voteswap | lib/django/templatetags/l10n.py | 337 | 1735 | from django.template import Library, Node, TemplateSyntaxError
from django.utils import formats
from django.utils.encoding import force_text
register = Library()
@register.filter(is_safe=False)
def localize(value):
"""
Forces a value to be rendered as a localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_text(formats.localize(value, use_l10n=True))
@register.filter(is_safe=False)
def unlocalize(value):
"""
Forces a value to be rendered as a non-localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_text(value)
class LocalizeNode(Node):
def __init__(self, nodelist, use_l10n):
self.nodelist = nodelist
self.use_l10n = use_l10n
def __repr__(self):
return "<LocalizeNode>"
def render(self, context):
old_setting = context.use_l10n
context.use_l10n = self.use_l10n
output = self.nodelist.render(context)
context.use_l10n = old_setting
return output
@register.tag('localize')
def localize_tag(parser, token):
"""
Forces or prevents localization of values, regardless of the value of
`settings.USE_L10N`.
Sample usage::
{% localize off %}
var pi = {{ 3.1415 }};
{% endlocalize %}
"""
use_l10n = None
bits = list(token.split_contents())
if len(bits) == 1:
use_l10n = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_l10n = bits[1] == 'on'
nodelist = parser.parse(('endlocalize',))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
| mit | 2,824,667,929,690,543,600 | 26.109375 | 82 | 0.632277 | false |
PyBorg/PyBorg | pyborg-msnp.py | 3 | 4812 | #! /usr/bin/env python
#
# PyBorg MSN module
#
# Copyright (c) 2006 Sebastien Dailly
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import time
import sys
import pyborg
import cfgfile
import traceback
import thread
try:
import msnp
except:
print "ERROR !!!!\msnp not found, please install it ( http://msnp.sourceforge.net/ )"
sys.exit(1)
def get_time():
"""
Return time as a nice yummy string
"""
return time.strftime("%H:%M:%S", time.localtime(time.time()))
class ModMSN(msnp.Session, msnp.ChatCallbacks):
def __init__(self, my_pyborg, args):
"""
Args will be sys.argv (command prompt arguments)
"""
# PyBorg
self.pyborg = my_pyborg
# load settings
self.settings = cfgfile.cfgset()
self.settings.load("pyborg-msn.cfg",
{ "myname": ("The bot's nickname", "PyBorg"),
"msn_passport": ("Reported passport account", "[email protected]"),
"msn_password": ("Reported password account", "password"),
"owners": ("Owner(s) passport account", [ "[email protected]" ]),
"password": ("password for control the bot (Edit manually !)", "")
} )
self.owners = self.settings.owners[:]
def our_start(self):
print "Connecting to msn..."
msnp.Session.__init__(self, self.MsnListener(self))
self.login(self.settings.msn_passport, self.settings.msn_password)
if self.logged_in: print "connected"
self.sync_friend_list()
while True:
bot.process(chats = True)
time.sleep(1)
class MsnListener(msnp.SessionCallbacks):
def __init__(self, bot):
self.bot = bot
def chat_started(self, chat):
callbacks = ModMSN.MsnChatActions(bot)
chat.callbacks = callbacks
callbacks.chat = chat
class MsnChatActions(msnp.ChatCallbacks):
# Command list for this module
commandlist = "MSN Module Commands:\n!nick, !owner"
# Detailed command description dictionary
commanddict = {
"nick": "Owner command. Usage: !nick nickname\nChange nickname",
"quit": "Owner command. Usage: !quit\nMake the bot quit IRC",
"owner": "Usage: !owner password\nAllow to become owner of the bot"
}
def __init__(self, bot):
self.bot = bot
def message_received(self, passport_id, display_name, text, charset):
print '%s: %s' % (passport_id, text)
if text[0] == '!':
if self.msn_command(passport_id, display_name, text, charset) == 1:
return
self.chat.send_typing()
if passport_id in bot.owners:
bot.pyborg.process_msg(self, text, 100, 1, (charset, display_name, text), owner=1)
else:
thread.start_new_thread(bot.pyborg.process_msg, (self, text, 100, 1, (charset, display_name, text)))
def msn_command(self, passport_id, display_name, text, charset):
command_list = text.split()
command_list[0] = command_list[0].lower()
if command_list[0] == "!owner" and len(command_list) > 1 and passport_id not in bot.owners:
if command_list[1] == bot.settings.password:
bot.owners.append(passport_id)
self.output("You've been added to owners list", (charset, display_name, text))
else:
self.output("try again", (charset))
if passport_id in bot.owners:
if command_list[0] == '!nick' and len(command_list) > 1:
bot.change_display_name(command_list[1])
def output(self, message, args):
charset, display_name, text = args
message = message.replace("#nick", display_name)
print "[%s] <%s> > %s> %s" % ( get_time(), display_name, bot.display_name, text)
print "[%s] <%s> > %s> %s" % ( get_time(), bot.display_name, display_name, message)
self.chat.send_message(message, charset)
if __name__ == "__main__":
if "--help" in sys.argv:
print "Pyborg msn bot. Usage:"
print " pyborg-msn.py"
print "Defaults stored in pyborg-msn.cfg"
print
sys.exit(0)
# start the pyborg
my_pyborg = pyborg.pyborg()
bot = ModMSN(my_pyborg, sys.argv)
try:
bot.our_start()
except KeyboardInterrupt, e:
pass
except SystemExit, e:
pass
except:
traceback.print_exc()
c = raw_input("Ooops! It looks like Pyborg has crashed. Would you like to save its dictionary? (y/n) ")
if c.lower()[:1] == 'n':
sys.exit(0)
bot.logout()
my_pyborg.save_all()
del my_pyborg
| gpl-2.0 | 1,246,532,129,342,823,000 | 27.642857 | 105 | 0.672901 | false |
zhangxiaolins/python_base | tests/test_notifier.py | 2 | 12210 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import mock
from essential import context
from essential.fixture import config
from essential.fixture import moxstubout
from essential import log
from essential.notifier import api as notifier_api
from essential.notifier import log_notifier
from essential.notifier import no_op_notifier
from essential.notifier import proxy
from essential import rpc
from essential import test
ctxt = context.get_admin_context()
ctxt2 = context.get_admin_context()
class NotifierTestCase(test.BaseTestCase):
"""Test case for notifications."""
def setUp(self):
super(NotifierTestCase, self).setUp()
notification_driver = [
'essential.notifier.no_op_notifier'
]
self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
self.config = self.useFixture(config.Config()).config
self.CONF = self.useFixture(config.Config()).conf
self.config(notification_driver=notification_driver)
self.config(default_publisher_id='publisher')
self.addCleanup(notifier_api._reset_drivers)
def test_send_notification(self):
self.notify_called = False
def mock_notify(cls, *args):
self.notify_called = True
self.stubs.Set(no_op_notifier, 'notify',
mock_notify)
notifier_api.notify(ctxt, 'publisher_id', 'event_type',
notifier_api.WARN, dict(a=3))
self.assertEqual(self.notify_called, True)
def test_verify_message_format(self):
"""A test to ensure changing the message format is prohibitively
annoying.
"""
def message_assert(context, message):
fields = [('publisher_id', 'publisher_id'),
('event_type', 'event_type'),
('priority', 'WARN'),
('payload', dict(a=3))]
for k, v in fields:
self.assertEqual(message[k], v)
self.assertTrue(len(message['message_id']) > 0)
self.assertTrue(len(message['timestamp']) > 0)
self.assertEqual(context, ctxt)
self.stubs.Set(no_op_notifier, 'notify',
message_assert)
notifier_api.notify(ctxt, 'publisher_id', 'event_type',
notifier_api.WARN, dict(a=3))
def _test_rpc_notify(self, driver, envelope=False):
self.stubs.Set(self.CONF, 'notification_driver', [driver])
self.mock_notify = False
self.envelope = False
def mock_notify(cls, *args, **kwargs):
self.mock_notify = True
self.envelope = kwargs.get('envelope', False)
self.stubs.Set(rpc, 'notify', mock_notify)
notifier_api.notify(ctxt, 'publisher_id', 'event_type',
notifier_api.WARN, dict(a=3))
self.assertEqual(self.mock_notify, True)
self.assertEqual(self.envelope, envelope)
def test_rpc_notifier(self):
self._test_rpc_notify('essential.notifier.rpc_notifier')
def test_rpc_notifier2(self):
self._test_rpc_notify('essential.notifier.rpc_notifier2', True)
def test_invalid_priority(self):
self.assertRaises(notifier_api.BadPriorityException,
notifier_api.notify, ctxt, 'publisher_id',
'event_type', 'not a priority', dict(a=3))
def test_rpc_priority_queue(self):
self.CONF.import_opt('notification_topics',
'essential.notifier.rpc_notifier')
self.stubs.Set(self.CONF, 'notification_driver',
['essential.notifier.rpc_notifier'])
self.stubs.Set(self.CONF, 'notification_topics',
['testnotify', ])
self.test_topic = None
def mock_notify(context, topic, msg):
self.test_topic = topic
self.stubs.Set(rpc, 'notify', mock_notify)
notifier_api.notify(ctxt, 'publisher_id',
'event_type', 'DEBUG', dict(a=3))
self.assertEqual(self.test_topic, 'testnotify.debug')
def test_error_notification(self):
self.config(publish_errors=True,
use_stderr=False)
def mock_notify(context, message):
msgs.append(message)
msgs = []
self.stubs.Set(no_op_notifier, 'notify', mock_notify)
LOG = log.getLogger('test_error_notification.common')
log.setup('test_error_notification')
LOG.error('foo')
self.assertEqual(1, len(msgs))
msg = msgs[0]
self.assertEqual(msg['event_type'], 'error_notification')
self.assertEqual(msg['priority'], 'ERROR')
self.assertEqual(msg['payload']['error'], 'foo')
def test_send_notification_by_decorator(self):
self.notify_called = False
def example_api(arg1, arg2):
return arg1 + arg2
example_api = notifier_api.notify_decorator(
'example_api',
example_api)
def mock_notify(cls, *args):
self.notify_called = True
self.stubs.Set(no_op_notifier, 'notify',
mock_notify)
self.assertEqual(3, example_api(1, 2))
self.assertEqual(self.notify_called, True)
def test_decorator_context(self):
"""Verify that the notify decorator can extract the 'context' arg."""
self.notify_called = False
self.context_arg = None
def example_api(arg1, arg2, context):
return arg1 + arg2
def example_api2(arg1, arg2, **kw):
return arg1 + arg2
example_api = notifier_api.notify_decorator(
'example_api',
example_api)
example_api2 = notifier_api.notify_decorator(
'example_api2',
example_api2)
def mock_notify(context, cls, _type, _priority, _payload):
self.notify_called = True
self.context_arg = context
self.stubs.Set(notifier_api, 'notify',
mock_notify)
# Test positional context
self.assertEqual(3, example_api(1, 2, ctxt))
self.assertEqual(self.notify_called, True)
self.assertEqual(self.context_arg, ctxt)
self.notify_called = False
self.context_arg = None
# Test named context
self.assertEqual(3, example_api2(1, 2, context=ctxt2))
self.assertEqual(self.notify_called, True)
self.assertEqual(self.context_arg, ctxt2)
# Test missing context
self.assertEqual(3, example_api2(1, 2, bananas="delicious"))
self.assertEqual(self.notify_called, True)
self.assertIsNone(self.context_arg)
class MultiNotifierTestCase(test.BaseTestCase):
"""Test case for notifications."""
def setUp(self):
super(MultiNotifierTestCase, self).setUp()
self.config = self.useFixture(config.Config()).config
self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
# Mock log to add one to exception_count when log.exception is called
def mock_exception(cls, *args):
self.exception_count += 1
self.exception_count = 0
notifier_log = log.getLogger(
'essential.notifier.api')
self.stubs.Set(notifier_log, "exception", mock_exception)
# Mock no_op notifier to add one to notify_count when called.
def mock_notify(cls, *args):
self.notify_count += 1
self.notify_count = 0
self.stubs.Set(no_op_notifier, 'notify', mock_notify)
# Mock log_notifier to raise RuntimeError when called.
def mock_notify2(cls, *args):
raise RuntimeError("Bad notifier.")
self.stubs.Set(log_notifier, 'notify', mock_notify2)
self.addCleanup(notifier_api._reset_drivers)
def test_send_notifications_successfully(self):
notification_driver = [
'essential.notifier.no_op_notifier'
]
self.config(notification_driver=notification_driver)
notifier_api.notify('contextarg',
'publisher_id',
'event_type',
notifier_api.WARN,
dict(a=3))
self.assertEqual(self.notify_count, 1)
self.assertEqual(self.exception_count, 0)
def test_send_notifications_with_errors(self):
notification_driver = [
'essential.notifier.no_op_notifier',
'essential.notifier.log_notifier'
]
self.config(notification_driver=notification_driver)
notifier_api.notify('contextarg',
'publisher_id',
'event_type',
notifier_api.WARN,
dict(a=3))
self.assertEqual(self.notify_count, 1)
self.assertEqual(self.exception_count, 1)
def test_when_driver_fails_to_import(self):
notification_driver = [
'essential.notifier.no_op_notifier',
'essential.notifier.logo_notifier',
'fdsjgsdfhjkhgsfkj'
]
self.config(notification_driver=notification_driver)
notifier_api.notify('contextarg',
'publisher_id',
'event_type',
notifier_api.WARN,
dict(a=3))
self.assertEqual(self.exception_count, 2)
self.assertEqual(self.notify_count, 1)
def test_publisher_id(self):
self.assertEqual(notifier_api.publisher_id('foobar'),
'foobar.' + socket.gethostname())
self.assertEqual(notifier_api.publisher_id('foobar', 'baz'),
'foobar.baz')
class NotifierProxyTestCase(test.BaseTestCase):
def setUp(self):
super(NotifierProxyTestCase, self).setUp()
self.proxy = proxy.get_notifier(service='service', host='my')
def _call(self, priority):
return mock.call({}, "service.my", "event", priority, "payload")
def test_audit(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.audit({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("INFO"))
def test_debug(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.debug({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("DEBUG"))
def test_info(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.info({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("INFO"))
def test_warn(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.warn({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("WARN"))
def test_warning(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.warning({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("WARN"))
def test_critical(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.critical({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("CRITICAL"))
def test_error(self):
with mock.patch('essential.notifier.api.notify') as notifier:
self.proxy.error({}, "event", "payload")
self.assertEqual(notifier.call_args, self._call("ERROR"))
| apache-2.0 | 6,776,317,366,234,459,000 | 35.447761 | 78 | 0.599017 | false |
molobrakos/home-assistant | homeassistant/components/zwave/cover.py | 8 | 6283 | """Support for Z-Wave covers."""
import logging
from homeassistant.core import callback
from homeassistant.components.cover import (
DOMAIN, SUPPORT_OPEN, SUPPORT_CLOSE, ATTR_POSITION)
from homeassistant.components.cover import CoverDevice
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
ZWaveDeviceEntity, CONF_INVERT_OPENCLOSE_BUTTONS, workaround)
from .const import (
COMMAND_CLASS_SWITCH_MULTILEVEL, COMMAND_CLASS_SWITCH_BINARY,
COMMAND_CLASS_BARRIER_OPERATOR, DATA_NETWORK)
_LOGGER = logging.getLogger(__name__)
SUPPORT_GARAGE = SUPPORT_OPEN | SUPPORT_CLOSE
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old method of setting up Z-Wave covers."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Cover from Config Entry."""
@callback
def async_add_cover(cover):
"""Add Z-Wave Cover."""
async_add_entities([cover])
async_dispatcher_connect(hass, 'zwave_new_cover', async_add_cover)
def get_device(hass, values, node_config, **kwargs):
"""Create Z-Wave entity device."""
invert_buttons = node_config.get(CONF_INVERT_OPENCLOSE_BUTTONS)
if (values.primary.command_class ==
COMMAND_CLASS_SWITCH_MULTILEVEL
and values.primary.index == 0):
return ZwaveRollershutter(hass, values, invert_buttons)
if values.primary.command_class == COMMAND_CLASS_SWITCH_BINARY:
return ZwaveGarageDoorSwitch(values)
if values.primary.command_class == \
COMMAND_CLASS_BARRIER_OPERATOR:
return ZwaveGarageDoorBarrier(values)
return None
class ZwaveRollershutter(ZWaveDeviceEntity, CoverDevice):
"""Representation of an Z-Wave cover."""
def __init__(self, hass, values, invert_buttons):
"""Initialize the Z-Wave rollershutter."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._network = hass.data[DATA_NETWORK]
self._open_id = None
self._close_id = None
self._current_position = None
self._invert_buttons = invert_buttons
self._workaround = workaround.get_device_mapping(values.primary)
if self._workaround:
_LOGGER.debug("Using workaround %s", self._workaround)
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
# Position value
self._current_position = self.values.primary.data
if self.values.open and self.values.close and \
self._open_id is None and self._close_id is None:
if self._invert_buttons:
self._open_id = self.values.close.value_id
self._close_id = self.values.open.value_id
else:
self._open_id = self.values.open.value_id
self._close_id = self.values.close.value_id
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
if self.current_cover_position > 0:
return False
return True
@property
def current_cover_position(self):
"""Return the current position of Zwave roller shutter."""
if self._workaround == workaround.WORKAROUND_NO_POSITION:
return None
if self._current_position is not None:
if self._current_position <= 5:
return 0
if self._current_position >= 95:
return 100
return self._current_position
def open_cover(self, **kwargs):
"""Move the roller shutter up."""
self._network.manager.pressButton(self._open_id)
def close_cover(self, **kwargs):
"""Move the roller shutter down."""
self._network.manager.pressButton(self._close_id)
def set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
self.node.set_dimmer(self.values.primary.value_id,
kwargs.get(ATTR_POSITION))
def stop_cover(self, **kwargs):
"""Stop the roller shutter."""
self._network.manager.releaseButton(self._open_id)
class ZwaveGarageDoorBase(ZWaveDeviceEntity, CoverDevice):
"""Base class for a Zwave garage door device."""
def __init__(self, values):
"""Initialize the zwave garage door."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._state = None
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
self._state = self.values.primary.data
_LOGGER.debug("self._state=%s", self._state)
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_GARAGE
class ZwaveGarageDoorSwitch(ZwaveGarageDoorBase):
"""Representation of a switch based Zwave garage door device."""
@property
def is_closed(self):
"""Return the current position of Zwave garage door."""
return not self._state
def close_cover(self, **kwargs):
"""Close the garage door."""
self.values.primary.data = False
def open_cover(self, **kwargs):
"""Open the garage door."""
self.values.primary.data = True
class ZwaveGarageDoorBarrier(ZwaveGarageDoorBase):
"""Representation of a barrier operator Zwave garage door device."""
@property
def is_opening(self):
"""Return true if cover is in an opening state."""
return self._state == "Opening"
@property
def is_closing(self):
"""Return true if cover is in a closing state."""
return self._state == "Closing"
@property
def is_closed(self):
"""Return the current position of Zwave garage door."""
return self._state == "Closed"
def close_cover(self, **kwargs):
"""Close the garage door."""
self.values.primary.data = "Closed"
def open_cover(self, **kwargs):
"""Open the garage door."""
self.values.primary.data = "Opened"
| apache-2.0 | 4,874,898,263,248,677,000 | 32.77957 | 77 | 0.636161 | false |
ThinkingBridge/platform_external_chromium_org | media/tools/bug_hunter/bug_hunter_unittest.py | 47 | 6473 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit Tests for bug hunter."""
import logging
from optparse import Values
import smtplib
import sys
import unittest
from bug_hunter import BugHunter
from bug_hunter import BugHunterUtils
try:
import atom.data
import gdata.data
import gdata.projecthosting.client
except ImportError:
logging.error('gdata-client needs to be installed. Please install\n'
'and try again (http://code.google.com/p/gdata-python-client/)')
sys.exit(1)
class MockClient(object):
"""A mock class for gdata.projecthosting.client.ProjectHostingClient.
Mocking the very simple method invocations for get_issues() and
get_comments().
"""
def _CreateIssues(self, n_issues):
feed = gdata.projecthosting.data.IssuesFeed()
for i in xrange(n_issues):
feed.entry.append(gdata.projecthosting.data.IssueEntry(
title=atom.data.Title(text='title'),
content=atom.data.Content(text='http://www.content.com'),
id=atom.data.Id(text='/' + str(i)),
status=gdata.projecthosting.data.Status(text='Unconfirmed'),
state=gdata.projecthosting.data.State(text='open'),
label=[gdata.projecthosting.data.Label('label1')],
author=[atom.data.Author(name=atom.data.Name(text='author'))]))
return feed
def get_issues(self, project_name, query):
"""Get issues using mock object without calling the issue tracker API.
Based on query argument, this returns the dummy issues. The number of
dummy issues are specified in query.text_query.
Args:
project_name: A string for project name in the issue tracker.
query: A query object for querying the issue tracker.
Returns:
A IssuesFeed object that contains a simple test issue.
"""
n_issues = 1
if query.text_query.isdigit():
n_issues = int(query.text_query)
return self._CreateIssues(n_issues)
def get_comments(self, project_name, issue_id):
"""Get comments using mock object without calling the issue tracker API.
Args:
project_name: A string for project name in the issue tracker.
issue_id: Issue_id string.
Returns:
A CommentsFeed object that contains a simple test comment.
"""
feed = gdata.projecthosting.data.CommentsFeed()
feed.entry = [gdata.projecthosting.data.CommentEntry(
id=atom.data.Id(text='/0'),
content=atom.data.Content(text='http://www.comments.com'),
updated=atom.data.Updated(text='Updated'),
author=[atom.data.Author(name=atom.data.Name(text='cauthor'))])]
return feed
class BugHunterUnitTest(unittest.TestCase):
"""Unit tests for the Bug Hunter class."""
def setUp(self):
self._old_client = gdata.projecthosting.client.ProjectHostingClient
gdata.projecthosting.client.ProjectHostingClient = MockClient
def tearDown(self):
gdata.projecthosting.client.ProjectHostingClient = self._old_client
def _GetDefaultOption(self, set_10_days_ago, query='steps'):
ops = Values()
ops.query = query
if set_10_days_ago:
ops.interval_value = 10
ops.interval_unit = 'days'
else:
ops.interval_value = None
ops.email_entries = ['comments']
ops.project_name = 'chromium'
ops.query_title = 'query title'
ops.max_comments = None
return ops
def _GetIssue(self, n_issues):
issues = []
for i in xrange(n_issues):
issues.append({'issue_id': str(i), 'title': 'title', 'author': 'author',
'status': 'status', 'state': 'state',
'content': 'content', 'comments': [],
'labels': [], 'urls': []})
return issues
def testSetUpEmailSubjectMsg(self):
bh = BugHunter(self._GetDefaultOption(False))
subject, content = bh._SetUpEmailSubjectMsg(self._GetIssue(1))
self.assertEquals(subject,
'BugHunter found 1 query title bug!')
self.assertEquals(content,
('<a href="http://code.google.com/p/chromium/issues/'
'list?can=2&colspec=ID+Pri+Mstone+ReleaseBlock+Area+'
'Feature+Status+Owner+Summary&cells=tiles&sort=-id&'
'q=steps">Used Query</a>: steps<br><br>The number of '
'issues : 1<br><ul><li><a href="http://crbug.com/0">0 '
'title</a> [] </li></ul>'))
def testSetUpEmailSubjectMsgMultipleIssues(self):
bh = BugHunter(self._GetDefaultOption(False))
subject, content = bh._SetUpEmailSubjectMsg(self._GetIssue(2))
self.assertEquals(subject,
'BugHunter found 2 query title bugs!')
def testSetUpEmailSubjectMsgWith10DaysAgoAndAssertSubject(self):
bh = BugHunter(self._GetDefaultOption(True))
subject, _ = bh._SetUpEmailSubjectMsg(self._GetIssue(1))
self.assertEquals(subject,
('BugHunter found 1 query title bug in the past 10 '
'days!'))
def testGetIssuesWithMockClient(self):
bh = BugHunter(self._GetDefaultOption(False,
query=('dummy')))
expected_issues = [{'issue_id': '0', 'title': 'title', 'author': 'author',
'status': 'Unconfirmed', 'state': 'open',
'content': 'http://www.content.com',
'comments': '', 'labels': ['label1'],
'urls': ['http://www.content.com']}]
self.assertEquals(expected_issues, bh.GetIssues())
class MockSmtp(object):
"""A mock class for SMTP."""
def __init__(self, server):
pass
def sendmail(self, sender_email_address, receivers_email_addresses,
msg):
# TODO(imasaki): Do something here.
return True
def quit(self):
pass
class BugHunterUtilsTest(unittest.TestCase):
"""Unit tests for the Bug Hunter utility."""
def testStripHTML(self):
self.assertEquals(BugHunterUtils.StripHTML('<p>X</p>'), 'X')
def testStripHTMLEmpty(self):
self.assertEquals(BugHunterUtils.StripHTML(''), '')
def testSendEmail(self):
smtplib.SMTP = MockSmtp
self.assertEqual(BugHunterUtils.SendEmail('message', 'sender_email_address',
'receivers_email_addresses',
'subject'),
True)
| bsd-3-clause | -3,044,106,016,738,337,300 | 34.565934 | 80 | 0.630156 | false |
wanghongjuan/crosswalk-test-suite | webapi/webapi-iap-xwalk-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause | 1,980,234,993,638,683,000 | 28.775701 | 92 | 0.54457 | false |
filterfoundry/django-mailer | django_mailer/management/commands/retry_deferred.py | 31 | 1131 | from django.core.management.base import NoArgsCommand
from django_mailer import models
from django_mailer.management.commands import create_handler
from optparse import make_option
import logging
class Command(NoArgsCommand):
help = 'Place deferred messages back in the queue.'
option_list = NoArgsCommand.option_list + (
make_option('-m', '--max-retries', type='int',
help="Don't reset deferred messages with more than this many "
"retries."),
)
def handle_noargs(self, verbosity, max_retries=None, **options):
# Send logged messages to the console.
logger = logging.getLogger('django_mailer')
handler = create_handler(verbosity)
logger.addHandler(handler)
count = models.QueuedMessage.objects.retry_deferred(
max_retries=max_retries)
logger = logging.getLogger('django_mailer.commands.retry_deferred')
logger.warning("%s deferred message%s placed back in the queue" %
(count, count != 1 and 's' or ''))
logger.removeHandler(handler)
| mit | 6,898,434,033,276,580 | 39.392857 | 76 | 0.642794 | false |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/mpl_toolkits/mplot3d/art3d.py | 8 | 23462 | #!/usr/bin/python
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
self.set_zsort(kwargs.pop('zsort', True))
PolyCollection.__init__(self, verts, *args, **kwargs)
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei]) \
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec) for
(xs, ys, zs), fc, ec in zip(xyzlist, cface, cedge)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec in z_segments_2d]
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts(segments_3d)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.colorConverter.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| gpl-2.0 | -7,040,686,215,300,797,000 | 31.228022 | 89 | 0.579618 | false |
flotre/Sick-Beard | sickbeard/providers/xthor.py | 8 | 9852 | # -*- coding: latin-1 -*-
# Author: Raver2046 <[email protected]>
# based on tpi.py
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from bs4 import BeautifulSoup
from sickbeard import classes, show_name_helpers, logger
from sickbeard.common import Quality
import generic
import cookielib
import sickbeard
import urllib
import random
import urllib2
import re
class XTHORProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "XTHOR")
self.supportsBacklog = True
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.url = "https://xthor.bz"
self.login_done = False
self.failed_login_logged = False
self.successful_login_logged = False
def isEnabled(self):
return sickbeard.XTHOR
def getSearchParams(self, searchString, audio_lang, french=None, fullSeason=False):
results = []
if audio_lang == "en" and french==None:
results.append( urllib.urlencode( {
'keywords': searchString ,
} ) + "&cid=43,69&[PARAMSTR]=" + searchString )
elif audio_lang == "fr" or french:
results.append( urllib.urlencode( {
'keywords': searchString
} ) + "&cid=42,41&[PARAMSTR]=" + searchString)
else:
results.append( urllib.urlencode( {
'keywords': searchString
} ) + "&cid=42,43,41,69&[PARAMSTR]=" + searchString)
#Désactivé car on ne peut pas savoir la langue
#if fullSeason:
# results.append( urllib.urlencode( {
# 'keywords': searchString
# } ) + "&cid=70&[PARAMSTR]=" + searchString)
return results
def _get_season_search_strings(self, show, season):
showNam = show_name_helpers.allPossibleShowNames(show)
showNames = list(set(showNam))
results = []
for showName in showNames:
results.extend( self.getSearchParams(showName + " saison%d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " season%d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " saison %d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " season %d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " saison%02d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " season%02d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " saison %02d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + " season %02d" % season, show.audio_lang, fullSeason=True))
results.extend( self.getSearchParams(showName + ".S%02d." % season, show.audio_lang, fullSeason=True))
return results
def _get_episode_search_strings(self, ep_obj, french=None):
showNam = show_name_helpers.allPossibleShowNames(ep_obj.show)
showNames = list(set(showNam))
results = []
for showName in showNames:
results.extend( self.getSearchParams( "%s S%02dE%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode), ep_obj.show.audio_lang, french ))
results.extend( self.getSearchParams( "%s %dx%02d" % ( showName, ep_obj.scene_season, ep_obj.scene_episode ), ep_obj.show.audio_lang, french ))
return results
def _get_title_and_url(self, item):
return (item.title, item.url)
def getQuality(self, item):
return item.getQuality()
def _doLogin(self, login, password):
listeUserAgents = [ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; fr-fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13',
'Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.107 Safari/535.1',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/312.1 (KHTML, like Gecko) Safari/312',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8' ]
self.opener.addheaders = [('User-agent', random.choice(listeUserAgents))]
data = urllib.urlencode({'action':'login','loginbox_membername': login, 'loginbox_password' : password, 'loginbox_remember' : 'true'})
r = self.opener.open(self.url + '/ajax/login.php',data)
for index, cookie in enumerate(self.cj):
if (cookie.name == "tsue_member"): self.login_done = True
if not self.login_done and not self.failed_login_logged:
logger.log(u"Unable to login to XTHOR. Please check username and password.", logger.WARNING)
self.failed_login_logged = True
if self.login_done and not self.successful_login_logged:
logger.log(u"Login to XTHOR successful", logger.MESSAGE)
self.successful_login_logged = True
def _doSearch(self, searchString, show=None, season=None, french=None):
if not self.login_done:
self._doLogin( sickbeard.XTHOR_USERNAME, sickbeard.XTHOR_PASSWORD )
results = []
searchUrl = self.url + '?p=torrents&pid=10&search_type=name&' + searchString.replace('!','')
logger.log(u"Search string: " + searchUrl, logger.DEBUG)
r = self.opener.open( searchUrl )
soup = BeautifulSoup( r, "html.parser" )
resultsTable = soup.find("table", { "id" : "torrents_table_classic" })
if resultsTable:
rows = resultsTable.findAll("tr")
for row in rows:
link = row.find("a",href=re.compile("action=details"))
if link:
title = link.text
recherched=searchUrl.split("&[PARAMSTR]=")[1]
recherched=recherched.replace(" ","(.*)")
logger.log(u"XTHOR TITLE : " + title, logger.DEBUG)
logger.log(u"XTHOR CHECK MATCH : " + recherched, logger.DEBUG)
if re.match(recherched,title , re.IGNORECASE):
downloadURL = row.find("a",href=re.compile("action=download"))['href']
logger.log(u"XTHOR DOWNLOAD URL : " + downloadURL, logger.DEBUG)
quality = Quality.nameQuality( title )
if quality==Quality.UNKNOWN and title:
if '720p' not in title.lower() and '1080p' not in title.lower():
quality=Quality.SDTV
if show and french==None:
results.append( XTHORSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
elif show and french:
results.append( XTHORSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
else:
results.append( XTHORSearchResult( self.opener, title, downloadURL, quality ) )
return results
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
result = classes.TorrentDataSearchResult(episodes)
result.provider = self
return result
class XTHORSearchResult:
def __init__(self, opener, title, url, quality, audio_langs=None):
self.opener = opener
self.title = title
self.url = url
self.quality = quality
self.audio_langs=audio_langs
def getNZB(self):
logger.log(u"XTHOR GETNZB URL : " + self.url, logger.DEBUG)
return self.opener.open( self.url , 'wb').read()
def getQuality(self):
return self.quality
provider = XTHORProvider()
| gpl-3.0 | 2,272,520,082,890,294,500 | 46.815534 | 169 | 0.574518 | false |
mgedmin/cloneall | ghcloneall.py | 1 | 36053 | #!/usr/bin/python3
"""
Clone all Git repositories for a GitHub user or organisation.
"""
from __future__ import print_function
import argparse
import fnmatch
import os
import subprocess
import sys
import threading
from concurrent import futures
from operator import attrgetter
try:
# Python 2
from ConfigParser import SafeConfigParser as ConfigParser
except ImportError: # pragma: PY3
# Python 3
from configparser import ConfigParser
import requests
import requests_cache
__author__ = 'Marius Gedminas <[email protected]>'
__licence__ = 'MIT'
__url__ = 'https://github.com/mgedmin/ghcloneall'
__version__ = '1.10.1.dev0'
CONFIG_FILE = '.ghcloneallrc'
CONFIG_SECTION = 'ghcloneall'
USER_AGENT = 'ghcloneall/%s (using %s)' % (
__version__, requests.utils.default_user_agent(),
)
class Error(Exception):
"""An error that is not a bug in this script."""
def get_json_and_links(url, session=None):
"""Perform HTTP GET for a URL, return deserialized JSON and headers.
Returns a tuple (json_data, links) where links is something dict-like.
"""
session = requests.Session() if session is None else session
r = session.get(url, headers={'user-agent': USER_AGENT})
# When we get a JSON error response fron GitHub, we want to show that
# message to the user instead of a traceback. I expect it'll be something
# like "rate limit exceeded, try again in N minutes".
if 400 <= r.status_code < 500:
raise Error("Failed to fetch {}:\n{}".format(url, r.json()['message']))
# But if GitHub is down and returns a 502 instead of a 200, let's not try
# to parse the response as JSON.
r.raise_for_status()
return r.json(), r.links
def get_github_list(url, batch_size=100, progress_callback=None, session=None):
"""Perform (a series of) HTTP GETs for a URL, return deserialized JSON.
Format of the JSON is documented at
http://developer.github.com/v3/repos/#list-organization-repositories
Supports batching (which GitHub indicates by the presence of a Link header,
e.g. ::
Link: <https://api.github.com/resource?page=2>; rel="next",
<https://api.github.com/resource?page=5>; rel="last"
"""
session = requests.Session() if session is None else session
# API documented at http://developer.github.com/v3/#pagination
res, links = get_json_and_links('{}{}per_page={}'.format(
url, '&' if '?' in url else '?', batch_size), session)
while 'next' in links:
if progress_callback:
progress_callback(len(res))
more, links = get_json_and_links(links['next']['url'], session)
res += more
return res
def synchronized(method):
def wrapper(self, *args, **kw):
with self.lock:
return method(self, *args, **kw)
return wrapper
class Progress(object):
"""A progress bar.
There are two parts of progress output:
- a scrolling list of items
- a progress bar (or status message) at the bottom
These are controlled by the following API methods:
- status(msg) replaces the progress bar with a status message
- clear() clears the progress bar/status message
- set_total(n) defines how many items there will be in total
- item(text) shows an item and updates the progress bar
- update(extra_text) updates the last item (and highlights it in a
different color)
- finish(msg) clear the progress bar/status message and print a summary
"""
progress_bar_format = '[{bar}] {cur}/{total}'
bar_width = 20
full_char = '#'
empty_char = '.'
# XXX should use curses.tigetstr() to get these
# and curses.tparm() to specify arguments
t_cursor_up = '\033[%dA' # curses.tigetstr('cuu').replace('%p1', '')
t_cursor_down = '\033[%dB' # curses.tigetstr('cud').replace('%p1', '')
t_insert_lines = '\033[%dL' # curses.tigetstr('il').replace('%p1', '')
t_delete_lines = '\033[%dM' # curses.tigetstr('dl').replace('%p1', '')
t_reset = '\033[m' # curses.tigetstr('sgr0'), maybe overkill
t_red = '\033[31m' # curses.tparm(curses.tigetstr('setaf'), 1)
t_green = '\033[32m' # curses.tparm(curses.tigetstr('setaf'), 2)
t_brown = '\033[33m' # curses.tparm(curses.tigetstr('setaf'), 3)
def __init__(self, stream=None):
self.stream = sys.stdout if stream is None else stream
self.last_status = '' # so we know how many characters to erase
self.cur = self.total = 0
self.items = []
self.lock = threading.RLock()
self.finished = False
@synchronized
def status(self, message):
"""Replace the status message."""
if self.finished:
return
self.clear()
if message:
self.stream.write('\r')
self.stream.write(message)
self.stream.write('\r')
self.stream.flush()
self.last_status = message
@synchronized
def clear(self):
"""Clear the status message."""
if self.finished:
return
if self.last_status:
self.stream.write(
'\r{}\r'.format(' ' * len(self.last_status.rstrip())))
self.stream.flush()
self.last_status = ''
@synchronized
def finish(self, msg=''):
"""Clear the status message and print a summary.
Differs from status(msg) in that it leaves the cursor on a new line
and cannot be cleared.
"""
self.clear()
self.finished = True
if msg:
print(msg, file=self.stream)
def progress(self):
self.status(self.format_progress_bar(self.cur, self.total))
def format_progress_bar(self, cur, total):
return self.progress_bar_format.format(
cur=cur, total=total, bar=self.bar(cur, total))
def scale(self, range, cur, total):
return range * cur // max(total, 1)
def bar(self, cur, total):
n = min(self.scale(self.bar_width, cur, total), self.bar_width)
return (self.full_char * n).ljust(self.bar_width, self.empty_char)
def set_limit(self, total):
"""Specify the expected total number of items.
E.g. if you set_limit(10), this means you expect to call item() ten
times.
"""
self.total = total
self.progress()
@synchronized
def item(self, msg=''):
"""Show an item and update the progress bar."""
item = self.Item(self, msg, len(self.items))
self.items.append(item)
if msg:
self.clear()
self.draw_item(item)
self.cur += 1
self.progress()
return item
@synchronized
def draw_item(self, item, prefix='', suffix='\n', flush=True):
if self.finished:
return
if item.hidden:
return
self.stream.write(''.join([
prefix,
item.color,
item.msg,
item.reset,
suffix,
]))
if flush:
self.stream.flush()
@synchronized
def update_item(self, item):
n = sum(i.height for i in self.items[item.idx:])
# We could use use t_cursor_down % n to come back, but then we'd
# also have to emit a \r to return to the first column.
# NB: when the user starts typing random shit or hits ^C then
# characters we didn't expect get emitted on screen, so maybe I should
# tweak terminal modes and disable local echo? Or at least print
# spurious \rs every time?
self.draw_item(item, '\r' + self.t_cursor_up % n if n else '',
'\r' + self.t_cursor_down % n if n else '')
@synchronized
def delete_item(self, item):
if self.finished:
return
# NB: have to update item inside the critical section to avoid display
# corruption!
if item.hidden:
return
item.hidden = True
n = sum(i.height for i in self.items[item.idx:])
self.stream.write(''.join([
self.t_cursor_up % (n + 1),
self.t_delete_lines % 1,
self.t_cursor_down % n if n else '',
]))
self.stream.flush()
@synchronized
def extra_info(self, item, lines):
if self.finished:
return
assert not item.hidden
# NB: have to update item inside the critical section to avoid display
# corruption!
item.extra_info_lines += lines
n = sum(i.height for i in self.items[item.idx + 1:])
if n:
self.stream.write(self.t_cursor_up % n)
self.stream.write(self.t_insert_lines % len(lines))
for indent, color, line, reset in lines:
self.stream.write(''.join([indent, color, line, reset, '\n']))
# t_insert_lines may push the lines off the bottom of the screen,
# so we need to redraw everything below the item we've updated
# to be sure it's not gone.
for i in self.items[item.idx + 1:]:
self.draw_item(i, flush=False)
for indent, color, line, reset in i.extra_info_lines:
self.stream.write(''.join([indent, color, line, reset, '\n']))
self.progress()
class Item(object):
def __init__(self, progress, msg, idx):
self.progress = progress
self.msg = msg
self.idx = idx
self.extra_info_lines = []
self.color = self.progress.t_brown
self.reset = self.progress.t_reset
self.updated = False
self.failed = False
self.hidden = False
@property
def height(self):
# NB: any updates to attributes that affect height have to be
# synchronized!
return (0 if self.hidden else 1) + len(self.extra_info_lines)
def update(self, msg, failed=False):
"""Update the last shown item and highlight it."""
self.updated = True
self.reset = self.progress.t_reset
if failed:
self.failed = True
self.color = self.progress.t_red
elif not self.failed:
self.color = self.progress.t_green
self.msg += msg
self.progress.update_item(self)
def hide(self):
# NB: hidden items retain their extra_info, which can be
# confusing, so let's make sure we're not hiding any items with
# extra_info.
assert not self.extra_info_lines
self.progress.delete_item(self)
def finished(self, hide=False):
"""Mark the item as finished."""
if not self.updated and not self.failed:
self.color = ''
self.reset = ''
if hide:
self.hide()
else:
self.progress.update_item(self)
def extra_info(self, msg, color='', reset='', indent=' '):
"""Print some extra information."""
lines = [(indent, color, line, reset) for line in msg.splitlines()]
if not lines:
return
self.progress.extra_info(self, lines)
def error_info(self, msg):
"""Print some extra information about an error."""
self.extra_info(msg, color=self.progress.t_red,
reset=self.progress.t_reset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.clear()
if exc_type is KeyboardInterrupt:
self.finish('Interrupted')
class Repo(object):
def __init__(self, name, clone_url, alt_urls=()):
self.name = name
self.clone_url = clone_url
self.urls = {clone_url}
self.urls.update(alt_urls)
def __repr__(self):
return 'Repo({!r}, {!r}, {{{}}})'.format(
self.name, self.clone_url, ', '.join(map(repr, sorted(self.urls))))
def __eq__(self, other):
if not isinstance(other, Repo):
return False
return (
self.name, self.clone_url, self.urls,
) == (
other.name, other.clone_url, other.urls,
)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_repo(cls, repo):
# use repo['git_url'] for anonymous checkouts, but they're slower
# (at least as long as you use SSH connection multiplexing)
clone_url = repo['ssh_url']
return cls(repo['name'], clone_url, (repo['clone_url'],))
@classmethod
def from_gist(cls, gist):
return cls(gist['id'], gist['git_pull_url'], (gist['git_push_url'],))
class RepoWrangler(object):
def __init__(self, dry_run=False, verbose=0, progress=None, quiet=False,
token=None):
self.n_repos = 0
self.n_updated = 0
self.n_new = 0
self.n_dirty = 0
self.dry_run = dry_run
self.verbose = verbose or 0
self.quiet = quiet
self.progress = progress if progress else Progress()
self.lock = threading.Lock()
self.session = requests.Session()
if token:
self.session.auth = ('', token)
def get_github_list(self, list_url, message):
self.progress.status(message)
def progress_callback(n):
self.progress.status("{} ({})".format(message, n))
return get_github_list(list_url, progress_callback=progress_callback,
session=self.session)
def list_gists(self, user, pattern=None):
list_url = 'https://api.github.com/users/{}/gists'.format(user)
message = "Fetching list of {}'s gists from GitHub...".format(user)
gists = self.get_github_list(list_url, message)
if pattern:
# TBH this is of questionable utility, but maybe you want to clone
# a single gist and so you can pass --pattern=id-of-that-gist
gists = (g for g in gists if fnmatch.fnmatch(g['id'], pattern))
# other possibilities for filtering:
# - exclude private gists (if g['public'])
return sorted(map(Repo.from_gist, gists), key=attrgetter('name'))
def list_repos(self, user=None, organization=None, pattern=None,
include_archived=False, include_forks=False,
include_private=True, include_disabled=True):
if organization and not user:
owner = organization
list_url = 'https://api.github.com/orgs/{}/repos'.format(owner)
elif user and not organization:
owner = user
list_url = 'https://api.github.com/users/{}/repos'.format(owner)
else:
raise ValueError('specify either user or organization, not both')
message = "Fetching list of {}'s repositories from GitHub...".format(
owner)
# User repositories default to sort=full_name, org repositories default
# to sort=created. In theory we don't care because we will sort the
# list ourselves, but in the future I may want to start cloning in
# parallel with the paginated fetching. This requires the sorting to
# happen before pagination, i.e. on the server side, as I want to
# process the repositories alphabetically (both for aesthetic reasons,
# and in order for --start-from to be useful).
list_url += '?sort=full_name'
repos = self.get_github_list(list_url, message)
if not include_archived:
repos = (r for r in repos if not r['archived'])
if not include_forks:
repos = (r for r in repos if not r['fork'])
if not include_private:
repos = (r for r in repos if not r['private'])
if not include_disabled:
repos = (r for r in repos if not r['disabled'])
# other possibilities for filtering:
# - exclude template repos (if not r['is_template']), once that feature
# is out of beta
if pattern:
repos = (r for r in repos if fnmatch.fnmatch(r['name'], pattern))
return sorted(map(Repo.from_repo, repos), key=attrgetter('name'))
def repo_task(self, repo):
item = self.progress.item("+ {name}".format(name=repo.name))
task = RepoTask(repo, item, self, self.task_finished)
return task
@synchronized
def task_finished(self, task):
self.n_repos += 1
self.n_new += task.new
self.n_updated += task.updated
self.n_dirty += task.dirty
class RepoTask(object):
def __init__(self, repo, progress_item, options, finished_callback):
self.repo = repo
self.progress_item = progress_item
self.options = options
self.finished_callback = finished_callback
self.updated = False
self.new = False
self.dirty = False
def repo_dir(self, repo):
return repo.name
def repo_url(self, repo):
return repo.clone_url
def decode(self, output):
return output.decode('UTF-8', 'replace')
def branch_name(self, head):
if head.startswith('refs/'):
head = head[len('refs/'):]
if head.startswith('heads/'):
head = head[len('heads/'):]
return head
def pretty_command(self, args):
if self.options.verbose:
return ' '.join(args)
else:
return ' '.join(args[:2]) # 'git diff' etc.
def call(self, args, **kwargs):
"""Call a subprocess and return its exit code.
The subprocess is expected to produce no output. If any output is
seen, it'll be displayed as an error.
"""
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, **kwargs)
output, _ = p.communicate()
retcode = p.wait()
if output:
self.progress_item.error_info(self.decode(output))
self.progress_item.error_info(
'{command} exited with {rc}'.format(
command=self.pretty_command(args), rc=retcode))
return retcode
def check_call(self, args, **kwargs):
"""Call a subprocess.
The subprocess is expected to produce no output. If any output is
seen, it'll be displayed as an error.
The subprocess is expected to return exit code 0. If it returns
non-zero, that'll be displayed as an error.
"""
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, **kwargs)
output, _ = p.communicate()
retcode = p.wait()
if retcode != 0:
self.progress_item.update(' (failed)', failed=True)
if output or retcode != 0:
self.progress_item.error_info(self.decode(output))
self.progress_item.error_info(
'{command} exited with {rc}'.format(
command=self.pretty_command(args), rc=retcode))
def check_output(self, args, **kwargs):
"""Call a subprocess and return its standard output code.
The subprocess is expected to produce no output on stderr. If any
output is seen, it'll be displayed as an error.
The subprocess is expected to return exit code 0. If it returns
non-zero, that'll be displayed as an error.
"""
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
stdout, stderr = p.communicate()
retcode = p.wait()
if stderr or retcode != 0:
self.progress_item.error_info(self.decode(stderr))
self.progress_item.error_info(
'{command} exited with {rc}'.format(
command=self.pretty_command(args), rc=retcode))
return self.decode(stdout)
def run(self):
try:
dir = self.repo_dir(self.repo)
if os.path.exists(dir):
self.update(self.repo, dir)
self.verify(self.repo, dir)
else:
self.clone(self.repo, dir)
except Exception as e:
self.progress_item.error_info(
'{}: {}'.format(e.__class__.__name__, e))
finally:
if (self.options.quiet
and not self.progress_item.updated
and not self.progress_item.failed
and not self.progress_item.extra_info_lines):
self.progress_item.hide()
self.progress_item.finished()
if self.finished_callback:
self.finished_callback(self)
def aborted(self):
self.progress_item.update(' (aborted)', failed=True)
self.progress_item.finished()
if self.finished_callback:
self.finished_callback(self)
def clone(self, repo, dir):
self.progress_item.update(' (new)')
if not self.options.dry_run:
url = self.repo_url(repo)
self.check_call(['git', 'clone', '-q', url])
self.new = True
def update(self, repo, dir):
if not self.options.dry_run:
old_sha = self.get_current_commit(dir)
self.check_call(['git', 'pull', '-q', '--ff-only'], cwd=dir)
new_sha = self.get_current_commit(dir)
if old_sha != new_sha:
self.progress_item.update(' (updated)')
self.updated = True
def verify(self, repo, dir):
if self.has_local_changes(dir):
self.progress_item.update(' (local changes)')
self.dirty = True
if self.has_staged_changes(dir):
self.progress_item.update(' (staged changes)')
self.dirty = True
if self.has_local_commits(dir):
self.progress_item.update(' (local commits)')
self.dirty = True
branch = self.get_current_branch(dir)
if branch != 'master':
self.progress_item.update(' (not on master)')
if self.options.verbose >= 2:
self.progress_item.extra_info('branch: {}'.format(branch))
self.dirty = True
if self.options.verbose:
remote_url = self.get_remote_url(dir)
if not remote_url.endswith('.git'):
remote_url += '.git'
if remote_url not in repo.urls:
self.progress_item.update(' (wrong remote url)')
if self.options.verbose >= 2:
self.progress_item.extra_info(
'remote: {}'.format(remote_url))
self.progress_item.extra_info(
'expected: {}'.format(repo.clone_url))
for url in repo.urls:
if url != repo.clone_url:
self.progress_item.extra_info(
'alternatively: {}'.format(url))
self.dirty = True
if self.options.verbose:
unknown_files = self.get_unknown_files(dir)
if unknown_files:
self.progress_item.update(' (unknown files)')
if self.options.verbose >= 2:
if self.options.verbose < 3 and len(unknown_files) > 10:
unknown_files[10:] = [
'(and %d more)' % (len(unknown_files) - 10),
]
self.progress_item.extra_info('\n'.join(unknown_files))
self.dirty = True
def has_local_changes(self, dir):
# command borrowed from /usr/lib/git-core/git-sh-prompt
return self.call(
['git', 'diff', '--no-ext-diff', '--quiet', '--exit-code'],
cwd=dir) != 0
def has_staged_changes(self, dir):
# command borrowed from /usr/lib/git-core/git-sh-prompt
return self.call(
['git', 'diff-index', '--cached', '--quiet', 'HEAD', '--'],
cwd=dir) != 0
def has_local_commits(self, dir):
return self.check_output(['git', 'rev-list', '@{u}..'], cwd=dir) != ''
def get_current_commit(self, dir):
return self.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
def get_current_head(self, dir):
return self.check_output(
['git', 'symbolic-ref', 'HEAD'], cwd=dir).strip()
def get_current_branch(self, dir):
return self.branch_name(self.get_current_head(dir))
def get_remote_url(self, dir):
return self.check_output(
['git', 'ls-remote', '--get-url'], cwd=dir).strip()
def get_unknown_files(self, dir):
# command borrowed from /usr/lib/git-core/git-sh-prompt
return self.check_output(
['git', 'ls-files', '--others', '--exclude-standard', '--', ':/*'],
cwd=dir).splitlines()
class SequentialJobQueue(object):
def add(self, task):
task.run()
def finish(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.finish()
class ConcurrentJobQueue(object):
def __init__(self, concurrency=2):
self.jobs = set()
self.concurrency = concurrency
self.pool = futures.ThreadPoolExecutor(
max_workers=concurrency)
def add(self, task):
try:
while len(self.jobs) >= self.concurrency:
done, not_done = futures.wait(
self.jobs, return_when=futures.FIRST_COMPLETED)
self.jobs.difference_update(done)
future = self.pool.submit(task.run)
self.jobs.add(future)
except KeyboardInterrupt:
task.aborted()
raise
def finish(self):
self.pool.shutdown()
self.jobs.clear()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.finish()
def spawn_ssh_control_master():
# If the user has 'ControlMaster auto' in their ~/.ssh/config, one of the
# git clone/pull commands we initiate will start a control master process
# that will never exit, with its stdout/stderr pointing to our pipe, and
# our p.communicate() will block forever. So let's make sure there's a
# control master process running before we start git clone/pull processes.
# https://github.com/mgedmin/ghcloneall/issues/1
subprocess.Popen(['ssh', '-q', '-fN', '-M', '-o', 'ControlPersist=600',
'[email protected]'])
def read_config_file(filename):
config = ConfigParser()
config.read([filename])
return config
def write_config_file(filename, config):
with open(filename, 'w') as fp:
config.write(fp)
def _main():
parser = argparse.ArgumentParser(
description="Clone/update all user/org repositories from GitHub.")
parser.add_argument(
'--version', action='version',
version="%(prog)s version " + __version__)
parser.add_argument(
'-c', '--concurrency', type=int, default=4,
help="set concurrency level (default: %(default)s)")
parser.add_argument(
'-n', '--dry-run', action='store_true',
help="don't pull/clone, just print what would be done")
parser.add_argument(
'-q', '--quiet', action='store_true',
help="terser output")
parser.add_argument(
'-v', '--verbose', action='count',
help="perform additional checks")
parser.add_argument(
'--start-from', metavar='REPO',
help='skip all repositories that come before REPO alphabetically')
parser.add_argument(
'--organization',
help='specify the GitHub organization')
parser.add_argument(
'--user',
help='specify the GitHub user')
parser.add_argument(
'--github-token',
help='specify the GitHub token')
parser.add_argument(
'--gists', action='store_true', default=None,
help="clone user's gists")
parser.add_argument(
'--repositories', action='store_false', dest='gists',
help="clone user's or organisation's repositories (default)")
parser.add_argument(
'--pattern',
help='specify repository name glob pattern to filter')
parser.add_argument(
'--include-forks', action='store_true', default=None,
help='include repositories forked from other users/orgs')
parser.add_argument(
'--exclude-forks', action='store_false', dest='include_forks',
help='exclude repositories forked from other users/orgs (default)')
parser.add_argument(
'--include-archived', action='store_true', default=None,
help='include archived repositories')
parser.add_argument(
'--exclude-archived', action='store_false', dest='include_archived',
help='exclude archived repositories (default)')
parser.add_argument(
'--include-private', action='store_true', default=None,
help='include private repositories (default)')
parser.add_argument(
'--exclude-private', action='store_false', dest='include_private',
help='exclude private repositories')
# Apparently disabled repositories are private repositories were
# you didn't pay the bill, so you can't access them any more
# (until you pay the bill). I'm going to include them by
# default to let the user notice they have a problem (assuming
# git clone/git pull will fail on a disabled repository).
parser.add_argument(
'--include-disabled', action='store_true', default=None,
help='include disabled repositories (default)')
parser.add_argument(
'--exclude-disabled', action='store_false', dest='include_disabled',
help='exclude disabled repositories')
parser.add_argument(
'--init', action='store_true',
help='create a {} from command-line arguments'.format(CONFIG_FILE))
parser.add_argument(
'--http-cache', default='.httpcache', metavar='DBNAME',
# .sqlite will be appended automatically
help='cache HTTP requests on disk in an sqlite database for 5 minutes'
' (default: .httpcache)')
parser.add_argument(
'--no-http-cache', action='store_false', dest='http_cache',
help='disable HTTP disk caching')
args = parser.parse_args()
config = read_config_file(CONFIG_FILE)
if not args.user and not args.organization:
if config.has_option(CONFIG_SECTION, 'github_user'):
args.user = config.get(CONFIG_SECTION, 'github_user')
if config.has_option(CONFIG_SECTION, 'github_org'):
args.organization = config.get(CONFIG_SECTION, 'github_org')
if not args.github_token:
if config.has_option(CONFIG_SECTION, 'github_token'):
args.github_token = config.get(CONFIG_SECTION, 'github_token')
if not args.pattern:
if config.has_option(CONFIG_SECTION, 'pattern'):
args.pattern = config.get(CONFIG_SECTION, 'pattern')
if args.gists is None:
if config.has_option(CONFIG_SECTION, 'gists'):
args.gists = config.getboolean(CONFIG_SECTION, 'gists')
if args.include_forks is None:
if config.has_option(CONFIG_SECTION, 'include_forks'):
args.include_forks = config.getboolean(CONFIG_SECTION,
'include_forks')
if args.include_archived is None:
if config.has_option(CONFIG_SECTION, 'include_archived'):
args.include_archived = config.getboolean(CONFIG_SECTION,
'include_archived')
if args.include_private is None:
if config.has_option(CONFIG_SECTION, 'include_private'):
args.include_private = config.getboolean(CONFIG_SECTION,
'include_private')
if args.include_disabled is None:
if config.has_option(CONFIG_SECTION, 'include_disabled'):
args.include_disabled = config.getboolean(CONFIG_SECTION,
'include_disabled')
if args.user and args.organization:
parser.error(
"Please specify either --user or --organization, but not both.")
if not args.user and not args.organization:
parser.error(
"Please specify either --user or --organization")
if args.gists and not args.user:
parser.error(
"Please specify --user, not --organization, when using --gists")
if args.init:
config.remove_section(CONFIG_SECTION)
config.add_section(CONFIG_SECTION)
if args.user:
config.set(CONFIG_SECTION, 'github_user', args.user)
if args.organization:
config.set(CONFIG_SECTION, 'github_org', args.organization)
if args.github_token:
config.set(CONFIG_SECTION, 'github_token', args.github_token)
if args.pattern:
config.set(CONFIG_SECTION, 'pattern', args.pattern)
if args.gists is not None:
config.set(CONFIG_SECTION, 'gists', str(args.gists))
if args.include_forks is not None:
config.set(CONFIG_SECTION, 'include_forks',
str(args.include_forks))
if args.include_archived is not None:
config.set(CONFIG_SECTION, 'include_archived',
str(args.include_archived))
if args.include_private is not None:
config.set(CONFIG_SECTION, 'include_private',
str(args.include_private))
if args.include_disabled is not None:
config.set(CONFIG_SECTION, 'include_disabled',
str(args.include_disabled))
if not args.dry_run:
write_config_file(CONFIG_FILE, config)
print("Wrote {}".format(CONFIG_FILE))
else:
print(
"Did not write {} because --dry-run was specified".format(
CONFIG_FILE))
return
if args.include_private is None:
args.include_private = True
if args.include_disabled is None:
args.include_disabled = True
if args.http_cache:
requests_cache.install_cache(args.http_cache,
backend='sqlite',
expire_after=300)
spawn_ssh_control_master()
with Progress() as progress:
wrangler = RepoWrangler(dry_run=args.dry_run, verbose=args.verbose,
progress=progress, quiet=args.quiet,
token=args.github_token)
if args.gists:
repos = wrangler.list_gists(
user=args.user,
pattern=args.pattern,
)
else:
repos = wrangler.list_repos(
organization=args.organization,
user=args.user,
pattern=args.pattern,
include_forks=args.include_forks,
include_archived=args.include_archived,
include_private=args.include_private,
include_disabled=args.include_disabled,
)
progress.set_limit(len(repos))
if args.concurrency < 2:
queue = SequentialJobQueue()
else:
queue = ConcurrentJobQueue(args.concurrency)
with queue:
for repo in repos:
if args.start_from and repo.name < args.start_from:
progress.item()
continue
task = wrangler.repo_task(repo)
queue.add(task)
progress.finish(
"{0.n_repos} repositories: {0.n_updated} updated, {0.n_new} new,"
" {0.n_dirty} dirty.".format(wrangler))
def main():
try:
_main()
except Error as e:
sys.exit(e)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| mit | -8,850,432,707,497,072,000 | 36.0154 | 79 | 0.574599 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.