repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
amyvmiwei/kbengine | kbe/src/lib/python/Lib/gettext.py | 90 | 17661 | """Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import locale, copy, io, os, re, struct, sys
from errno import ENOENT
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'dgettext', 'dngettext', 'gettext', 'ngettext',
]
_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python lambda function that implements an equivalent expression.
"""
# Security check, allow only the "n" identifier
import token, tokenize
tokens = tokenize.generate_tokens(io.StringIO(plural).readline)
try:
danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n']
except tokenize.TokenError:
raise ValueError('plural forms expression error, maybe unbalanced parenthesis')
else:
if danger:
raise ValueError('plural forms expression could be dangerous')
# Replace some C operators by their Python equivalents
plural = plural.replace('&&', ' and ')
plural = plural.replace('||', ' or ')
expr = re.compile(r'\!([^=])')
plural = expr.sub(' not \\1', plural)
# Regular expression and replacement function used to transform
# "a?b:c" to "b if a else c".
expr = re.compile(r'(.*?)\?(.*?):(.*)')
def repl(x):
return "(%s if %s else %s)" % (x.group(2), x.group(1),
expr.sub(repl, x.group(3)))
# Code to transform the plural expression, taking care of parentheses
stack = ['']
for c in plural:
if c == '(':
stack.append('')
elif c == ')':
if len(stack) == 1:
# Actually, we never reach this code, because unbalanced
# parentheses get caught in the security check at the
# beginning.
raise ValueError('unbalanced parenthesis in plural form')
s = expr.sub(repl, stack.pop())
stack[-1] += '(%s)' % s
else:
stack[-1] += c
plural = expr.sub(repl, stack.pop())
return eval('lambda n: int(%s)' % plural)
def _expand_lang(loc):
loc = locale.normalize(loc)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = loc.find('@')
if pos >= 0:
modifier = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = loc.find('.')
if pos >= 0:
codeset = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = loc.find('_')
if pos >= 0:
territory = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = loc
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
if self._fallback:
return self._fallback.lgettext(message)
return message
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
return self._output_charset
def set_output_charset(self, charset):
self._output_charset = charset
def install(self, names=None):
import builtins
builtins.__dict__['_'] = self.gettext
if hasattr(names, "__contains__"):
if "gettext" in names:
builtins.__dict__['gettext'] = builtins.__dict__['_']
if "ngettext" in names:
builtins.__dict__['ngettext'] = self.ngettext
if "lgettext" in names:
builtins.__dict__['lgettext'] = self.lgettext
if "lngettext" in names:
builtins.__dict__['lngettext'] = self.lngettext
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
unpack = struct.unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1) # germanic plural by default
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise OSError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in range(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx+8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise OSError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastk = k = None
for b_item in tmsg.split('\n'.encode("ascii")):
item = b_item.decode().strip()
if not item:
continue
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
# Note: we unconditionally convert both msgids and msgstrs to
# Unicode using the character encoding specified in the charset
# parameter of the Content-Type header. The gettext documentation
# strongly encourages msgids to be us-ascii, but some applications
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
# traditional gettext applications, the msgid conversion will
# cause no problems since us-ascii should always be a subset of
# the charset encoding. We may want to fall back to 8-bit msgids
# if the Unicode conversion fails.
charset = self._charset or 'ascii'
if b'\x00' in msg:
# Plural forms
msgid1, msgid2 = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
msgid1 = str(msgid1, charset)
for i, x in enumerate(tmsg):
catalog[(msgid1, i)] = str(x, charset)
else:
catalog[str(msg, charset)] = str(tmsg, charset)
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def lgettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def lngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
return tmsg
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None, all=False):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
# a mapping between absolute .mo file path and Translation object
_translations = {}
def translation(domain, localedir=None, languages=None,
class_=None, fallback=False, codeset=None):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=True)
if not mofiles:
if fallback:
return NullTranslations()
raise OSError(ENOENT, 'No translation file found for domain', domain)
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
result = None
for mofile in mofiles:
key = (class_, os.path.abspath(mofile))
t = _translations.get(key)
if t is None:
with open(mofile, 'rb') as fp:
t = _translations.setdefault(key, class_(fp))
# Copy the translation object to allow setting fallbacks and
# output charset. All other instance data is shared with the
# cached object.
t = copy.copy(t)
if codeset:
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, codeset=None, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(names)
# a mapping b/w domains and locale directories
_localedirs = {}
# a mapping b/w domains and codesets
_localecodesets = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.gettext(message)
def ldgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.lngettext(msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
return ldngettext(_current_domain, msgid1, msgid2, n)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| lgpl-3.0 | 2,423,016,130,182,776,300 | 32.768642 | 87 | 0.585244 | false |
fernandezcuesta/ansible | test/units/modules/network/netscaler/test_netscaler_lb_vserver.py | 7 | 33531 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from .netscaler_module import TestModule, nitro_base_patcher, set_module_args
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerLBVServerModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.server_mock = MagicMock()
cls.server_mock.__class__ = MagicMock(add=Mock())
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding.sslvserver_sslcertkey_binding': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_lb_vserver
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.nitro_exception', MockException):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
feature_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
feature_mock.assert_called_with(client_mock, 'LB')
def test_ensure_feature_is_enabled_nitro_exception_caught(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
errorcode = 10
message = 'mock error'
class MockException(Exception):
def __init__(self):
self.errorcode = errorcode
self.message = message
feature_mock = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
nitro_exception=MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
expected_msg = 'nitro exception errorcode=%s, message=%s' % (errorcode, message)
self.assertEqual(result['msg'], expected_msg, 'Failed to handle nitro exception')
def test_create_new_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[False, True]),
lb_vserver_identical=Mock(side_effect=[True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'])
def test_update_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=Mock(return_value=[]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'])
def test_service_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, True]),
get_configured_service_bindings=Mock(return_value=configured_dict),
get_actual_service_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_servicegroup_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
get_configured_servicegroup_bindings=Mock(return_value=configured_dict),
get_actual_servicegroup_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_ssl_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
servicetype='SSL',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.exited()
self.assertTrue(len(ssl_sync_mock.mock_calls) > 0, msg='ssl cert_key bindings not called')
self.assertTrue(result['changed'])
def test_ssl_bindings_not_called_for_non_ssl_service(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
servicetype='HTTP',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
ssl_sync_mock.assert_not_called()
self.assertTrue(result['changed'])
def test_server_exists_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[False, False]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Did not create lb vserver')
def test_server_identical_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, False]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'lb vserver is not configured correctly')
def test_service_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, False]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'service bindings are not identical')
def test_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_server_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_absent_state_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, False]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'])
def test_absent_state_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertEqual(result['msg'], 'lb vserver still exists')
def test_disabled_state_change_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
do_state_change_mock = Mock(return_value=Mock(errorcode=0))
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
do_state_change=do_state_change_mock,
):
self.module = netscaler_lb_vserver
self.exited()
self.assertTrue(len(do_state_change_mock.mock_calls) > 0, msg='Did not call state change')
def test_get_immutables_failure(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
m = Mock(return_value=['some'])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(
result['msg'].startswith('Cannot update immutable attributes'),
msg='Did not handle immutables error correctly',
)
| gpl-3.0 | 1,804,822,049,070,154,500 | 39.156886 | 133 | 0.602368 | false |
fmacias64/keras | tests/auto/test_graph_model.py | 22 | 10705 | from __future__ import print_function
import unittest
import numpy as np
np.random.seed(1337)
from keras.models import Graph, Sequential
from keras.layers import containers
from keras.layers.core import Dense, Activation
from keras.utils.test_utils import get_test_data
X = np.random.random((100, 32))
X2 = np.random.random((100, 32))
y = np.random.random((100, 4))
y2 = np.random.random((100,))
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,),
classification=False, output_shape=(4,))
(X2_train, y2_train), (X2_test, y2_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,),
classification=False, output_shape=(1,))
class TestGraph(unittest.TestCase):
def test_1o_1i(self):
print('test a non-sequential graph with 1 input and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
def test_1o_1i_2(self):
print('test a more complex non-sequential graph with 1 input and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2-0', input='input1')
graph.add_node(Activation('relu'), name='dense2', input='dense2-0')
graph.add_node(Dense(4, 16), name='dense3', input='dense2')
graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_train})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
graph.get_config(verbose=1)
def test_1o_2i(self):
print('test a non-sequential graph with 2 inputs and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input2')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test, 'input2': X2_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test})
print(loss)
assert(loss < 3.0)
graph.get_config(verbose=1)
def test_2o_1i_weights(self):
print('test a non-sequential graph with 1 input and 2 outputs')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 2)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
print(loss)
assert(loss < 4.)
print('test weight saving')
graph.save_weights('temp.h5', overwrite=True)
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
graph.load_weights('temp.h5')
nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
print(nloss)
assert(loss == nloss)
def test_2o_1i_sample_weights(self):
print('test a non-sequential graph with 1 input and 2 outputs with sample weights')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
weights1 = np.random.uniform(size=y_train.shape[0])
weights2 = np.random.uniform(size=y2_train.shape[0])
weights1_test = np.random.uniform(size=y_test.shape[0])
weights2_test = np.random.uniform(size=y2_test.shape[0])
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10,
sample_weight={'output1': weights1, 'output2': weights2})
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 2)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test},
sample_weight={'output1': weights1_test, 'output2': weights2_test})
loss = graph.train_on_batch({'input1': X_train, 'output1': y_train, 'output2': y2_train},
sample_weight={'output1': weights1, 'output2': weights2})
loss = graph.evaluate({'input1': X_train, 'output1': y_train, 'output2': y2_train},
sample_weight={'output1': weights1, 'output2': weights2})
print(loss)
def test_recursive(self):
print('test layer-like API')
graph = containers.Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, 32, name='first_seq_dense'))
seq.add(graph)
seq.add(Dense(4, 4, name='last_seq_dense'))
seq.compile('rmsprop', 'mse')
history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test, y_test)
print(loss)
assert(loss < 2.5)
loss = seq.evaluate(X_test, y_test, show_accuracy=True)
pred = seq.predict(X_test)
seq.get_config(verbose=1)
def test_create_output(self):
print('test create_output argument')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Dense(4, 4), name='output1', inputs=['dense2', 'dense3'], merge_mode='sum', create_output=True)
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
def test_count_params(self):
print('test count params')
nb_units = 100
nb_classes = 2
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=2)
graph.add_node(Dense(nb_units, nb_units),
name='dense1', input='input1')
graph.add_node(Dense(nb_units, nb_classes),
name='dense2', input='input2')
graph.add_node(Dense(nb_units, nb_classes),
name='dense3', input='dense1')
graph.add_output(name='output', inputs=['dense2', 'dense3'],
merge_mode='sum')
n = nb_units * nb_units + nb_units
n += nb_units * nb_classes + nb_classes
n += nb_units * nb_classes + nb_classes
self.assertEqual(n, graph.count_params())
graph.compile('rmsprop', {'output': 'binary_crossentropy'})
self.assertEqual(n, graph.count_params())
if __name__ == '__main__':
print('Test graph model')
unittest.main()
| mit | -6,227,203,422,963,254,000 | 43.053498 | 118 | 0.582345 | false |
pombredanne/rekall | rekall-core/rekall/addrspace_test.py | 3 | 2220 | from rekall import addrspace
from rekall import obj
from rekall import testlib
from rekall import session
class CustomRunsAddressSpace(addrspace.RunBasedAddressSpace):
def __init__(self, runs=None, data=None, **kwargs):
super(CustomRunsAddressSpace, self).__init__(**kwargs)
self.base = addrspace.BufferAddressSpace(data=data,
session=self.session)
for i in runs:
self.runs.insert(i)
class RunBasedTest(testlib.RekallBaseUnitTestCase):
"""Test the RunBasedAddressSpace implementation."""
def setUp(self):
self.session = session.Session()
self.contiguous_as = CustomRunsAddressSpace(session=self.session,
runs = [(1000, 0, 1), (1001, 1, 9)],
data="0123456789")
self.discontiguous_as = CustomRunsAddressSpace(session=self.session,
runs=[(1000, 0, 1), (1020, 1, 9)],
data="0123456789")
def testDiscontiguousRunsRead(self):
# Read from an address without data
self.assertEqual(self.discontiguous_as.read(0, 20),
"\x00" * 20)
# Read spanning two runs
self.assertEqual(self.discontiguous_as.read(1000, 30),
"0" + "\x00"*19 + "123456789" + "\x00")
# Read in the middle of a run
self.assertEqual(self.discontiguous_as.read(1025, 10),
"6789" + "\x00" * 6)
# Read past the end
self.assertEqual(self.discontiguous_as.read(2000, 10),
"\x00" * 10)
def testContiguousRunsRead(self):
# Read from an address without data
self.assertEqual(self.contiguous_as.read(0, 20),
"\x00" * 20)
# Read spanning two runs
self.assertEqual(self.contiguous_as.read(1000, 30),
"0123456789" + "\x00"*20)
# Read in the middle of a run
self.assertEqual(self.contiguous_as.read(1005, 10),
"56789" + "\x00" * 5)
# Read past the end
self.assertEqual(self.contiguous_as.read(2000, 10),
"\x00" * 10)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -6,945,785,807,624,606,000 | 37.947368 | 76 | 0.565766 | false |
tux-00/ansible | lib/ansible/modules/system/mount.py | 60 | 21299 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Red Hat, inc
# Written by Seth Vidal
# based on the mount modules from salt and puppet
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- This module controls active and configured mount points in C(/etc/fstab).
author:
- Ansible Core Team
- Seth Vidal
version_added: "0.6"
options:
path:
description:
- Path to the mount point (e.g. C(/mnt/files)).
- Before 2.3 this option was only usable as I(dest), I(destfile) and
I(name).
required: true
aliases: [ name ]
src:
description:
- Device to be mounted on I(path). Required when I(state) set to
C(present) or C(mounted).
required: false
default: null
fstype:
description:
- Filesystem type. Required when I(state) is C(present) or C(mounted).
required: false
default: null
opts:
description:
- Mount options (see fstab(5), or vfstab(4) on Solaris).
required: false
default: null
dump:
description:
- Dump (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Has no effect on Solaris systems.
required: false
default: 0
passno:
description:
- Passno (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Deprecated on Solaris systems.
required: false
default: 0
state:
description:
- If C(mounted) or C(unmounted), the device will be actively mounted or
unmounted as needed and appropriately configured in I(fstab).
- C(absent) and C(present) only deal with I(fstab) but will not affect
current mounting.
- If specifying C(mounted) and the mount point is not present, the mount
point will be created.
- Similarly, specifying C(absent) will remove the mount point directory.
required: true
choices: ["present", "absent", "mounted", "unmounted"]
fstab:
description:
- File to use instead of C(/etc/fstab). You shouldn't use this option
unless you really know what you are doing. This might be useful if
you need to configure mountpoints in a chroot environment. OpenBSD
does not allow specifying alternate fstab files with mount so do not
use this on OpenBSD with any state that operates on the live
filesystem.
required: false
default: /etc/fstab (/etc/vfstab on Solaris)
boot:
version_added: 2.2
description:
- Determines if the filesystem should be mounted on boot.
- Only applies to Solaris systems.
required: false
default: yes
choices: ["yes", "no"]
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as
default, but I(name) still works as well.
'''
EXAMPLES = '''
# Before 2.3, option 'name' was used instead of 'path'
- name: Mount DVD read-only
mount:
path: /mnt/dvd
src: /dev/sr0
fstype: iso9660
opts: ro
state: present
- name: Mount up device by label
mount:
path: /srv/disk
src: LABEL=SOME_LABEL
fstype: ext4
state: present
- name: Mount up device by UUID
mount:
path: /home
src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077
fstype: xfs
opts: noatime
state: present
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.ismount import ismount
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def write_fstab(lines, path):
fs_w = open(path, 'w')
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
def _escape_fstab(v):
"""Escape invalid characters in fstab fields.
space (040)
ampersand (046)
backslash (134)
"""
if isinstance(v, int):
return v
else:
return(
v.
replace('\\', '\\134').
replace(' ', '\\040').
replace('&', '\\046'))
def set_mount(module, args):
"""Set/change a mount point location in fstab."""
to_write = []
exists = False
changed = False
escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
if get_platform() == 'SunOS':
new_line = (
'%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n')
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
# Check if we found the correct line
if ld['name'] != escaped_args['name']:
to_write.append(line)
continue
# If we got here we found a match - let's check if there is any
# difference
exists = True
args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno')
if get_platform() == 'SunOS':
args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts')
for t in args_to_check:
if ld[t] != escaped_args[t]:
ld[t] = escaped_args[t]
changed = True
if changed:
to_write.append(new_line % ld)
else:
to_write.append(line)
if not exists:
to_write.append(new_line % escaped_args)
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def unset_mount(module, args):
"""Remove a mount point from fstab."""
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
if ld['name'] != escaped_name:
to_write.append(line)
continue
# If we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def _set_fstab_args(fstab_file):
result = []
if fstab_file and fstab_file != '/etc/fstab':
if get_platform().lower().endswith('bsd'):
result.append('-F')
else:
result.append('-T')
result.append(fstab_file)
return result
def mount(module, args):
"""Mount up a path or remount if needed."""
mount_bin = module.get_bin_path('mount', required=True)
name = args['name']
cmd = [mount_bin]
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [name]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def umount(module, path):
"""Unmount a path."""
umount_bin = module.get_bin_path('umount', required=True)
cmd = [umount_bin, path]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def remount(module, args):
"""Try to use 'remount' first and fallback to (u)mount if unsupported."""
mount_bin = module.get_bin_path('mount', required=True)
cmd = [mount_bin]
# Multiplatform remount opts
if get_platform().lower().endswith('bsd'):
cmd += ['-u']
else:
cmd += ['-o', 'remount']
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [args['name']]
out = err = ''
try:
if get_platform().lower().endswith('bsd'):
# Note: Forcing BSDs to do umount/mount due to BSD remount not
# working as expected (suspect bug in the BSD mount command)
# Interested contributor could rework this to use mount options on
# the CLI instead of relying on fstab
# https://github.com/ansible/ansible-modules-core/issues/5591
rc = 1
else:
rc, out, err = module.run_command(cmd)
except:
rc = 1
msg = ''
if rc != 0:
msg = out + err
rc, msg = umount(module, args['name'])
if rc == 0:
rc, msg = mount(module, args)
return rc, msg
# Note if we wanted to put this into module_utils we'd have to get permission
# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923
# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439
# and @abadger to relicense from GPLv3+
def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None):
"""Return whether the dest is bind mounted
:arg module: The AnsibleModule (used for helper functions)
:arg dest: The directory to be mounted under. This is the primary means
of identifying whether the destination is mounted.
:kwarg src: The source directory. If specified, this is used to help
ensure that we are detecting that the correct source is mounted there.
:kwarg fstype: The filesystem type. If specified this is also used to
help ensure that we are detecting the right mount.
:kwarg linux_mounts: Cached list of mounts for Linux.
:returns: True if the dest is mounted with src otherwise False.
"""
is_mounted = False
if get_platform() == 'Linux' and linux_mounts is not None:
if src is None:
# That's for unmounted/absent
for m in linux_mounts:
if m['dst'] == dest:
is_mounted = True
else:
mounted_src = None
for m in linux_mounts:
if m['dst'] == dest:
mounted_src = m['src']
# That's for mounted
if mounted_src is not None and mounted_src == src:
is_mounted = True
else:
bin_path = module.get_bin_path('mount', required=True)
cmd = '%s -l' % bin_path
rc, out, err = module.run_command(cmd)
mounts = []
if len(out):
mounts = to_native(out).strip().split('\n')
for mnt in mounts:
arguments = mnt.split()
if (
(arguments[0] == src or src is None) and
arguments[2] == dest and
(arguments[4] == fstype or fstype is None)):
is_mounted = True
if is_mounted:
break
return is_mounted
def get_linux_mounts(module):
"""Gather mount information"""
mntinfo_file = "/proc/self/mountinfo"
try:
f = open(mntinfo_file)
except IOError:
return
lines = map(str.strip, f.readlines())
try:
f.close()
except IOError:
module.fail_json(msg="Cannot close file %s" % mntinfo_file)
mntinfo = []
for line in lines:
fields = line.split()
record = {
'id': int(fields[0]),
'parent_id': int(fields[1]),
'root': fields[3],
'dst': fields[4],
'opts': fields[5],
'fs': fields[-3],
'src': fields[-2]
}
mntinfo.append(record)
mounts = []
for mnt in mntinfo:
src = mnt['src']
if mnt['parent_id'] != 1:
# Find parent
for m in mntinfo:
if mnt['parent_id'] == m['id']:
if (
len(m['root']) > 1 and
mnt['root'].startswith("%s/" % m['root'])):
# Ommit the parent's root in the child's root
# == Example:
# 204 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw
# 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
# == Expected result:
# src=/tmp/aaa
mnt['root'] = mnt['root'][len(m['root']) + 1:]
# Prepend the parent's dst to the child's root
# == Example:
# 42 60 0:35 / /tmp rw - tmpfs tmpfs rw
# 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw
# == Expected result:
# src=/tmp/aaa
if m['dst'] != '/':
mnt['root'] = "%s%s" % (m['dst'], mnt['root'])
src = mnt['root']
break
record = {
'dst': mnt['dst'],
'src': src,
'opts': mnt['opts'],
'fs': mnt['fs']
}
mounts.append(record)
return mounts
def main():
module = AnsibleModule(
argument_spec=dict(
boot=dict(default='yes', choices=['yes', 'no']),
dump=dict(),
fstab=dict(default=None),
fstype=dict(),
path=dict(required=True, aliases=['name'], type='path'),
opts=dict(),
passno=dict(type='str'),
src=dict(type='path'),
state=dict(
required=True,
choices=['present', 'absent', 'mounted', 'unmounted']),
),
supports_check_mode=True,
required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']]
)
)
# solaris args:
# name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
# linux args:
# name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
# Note: Do not modify module.params['fstab'] as we need to know if the user
# explicitly specified it in mount() and remount()
if get_platform().lower() == 'sunos':
args = dict(
name=module.params['path'],
opts='-',
passno='-',
fstab=module.params['fstab'],
boot='yes'
)
if args['fstab'] is None:
args['fstab'] = '/etc/vfstab'
else:
args = dict(
name=module.params['path'],
opts='defaults',
dump='0',
passno='0',
fstab=module.params['fstab']
)
if args['fstab'] is None:
args['fstab'] = '/etc/fstab'
# FreeBSD doesn't have any 'default' so set 'rw' instead
if get_platform() == 'FreeBSD':
args['opts'] = 'rw'
linux_mounts = []
# Cache all mounts here in order we have consistent results if we need to
# call is_bind_mouted() multiple times
if get_platform() == 'Linux':
linux_mounts = get_linux_mounts(module)
if linux_mounts is None:
args['warnings'] = (
'Cannot open file /proc/self/mountinfo. '
'Bind mounts might be misinterpreted.')
# Override defaults with user specified params
for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
if module.params[key] is not None:
args[key] = module.params[key]
# If fstab file does not exist, we first need to create it. This mainly
# happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
open(args['fstab'], 'a').close()
# absent:
# Remove from fstab and unmounted.
# unmounted:
# Do not change fstab state, but unmount.
# present:
# Add to fstab, do not change mount state.
# mounted:
# Add to fstab if not there and make sure it is mounted. If it has
# changed in fstab then remount it.
state = module.params['state']
name = module.params['path']
changed = False
if state == 'absent':
name, changed = unset_mount(module, args)
if changed and not module.check_mode:
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
os.rmdir(name)
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
elif state == 'unmounted':
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
if not module.check_mode:
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
changed = True
elif state == 'mounted':
if not os.path.exists(name) and not module.check_mode:
try:
os.makedirs(name)
except (OSError, IOError):
e = get_exception()
module.fail_json(
msg="Error making dir %s: %s" % (name, str(e)))
name, changed = set_mount(module, args)
res = 0
if (
ismount(name) or
is_bind_mounted(
module, linux_mounts, name, args['src'], args['fstype'])):
if changed and not module.check_mode:
res, msg = remount(module, args)
changed = True
else:
changed = True
if not module.check_mode:
res, msg = mount(module, args)
if res:
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
elif state == 'present':
name, changed = set_mount(module, args)
else:
module.fail_json(msg='Unexpected position reached')
module.exit_json(changed=changed, **args)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,339,664,479,489,836,000 | 28.297111 | 87 | 0.532983 | false |
erikdejonge/youtube-dl | youtube_dl/extractor/hidive.py | 23 | 4113 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
url_or_none,
urlencode_postdata,
)
class HiDiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<title>[^/]+)/(?P<key>[^/?#&]+)'
# Using X-Forwarded-For results in 403 HTTP error for HLS fragments,
# so disabling geo bypass completely
_GEO_BYPASS = False
_NETRC_MACHINE = 'hidive'
_LOGIN_URL = 'https://www.hidive.com/account/login'
_TESTS = [{
'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001',
'info_dict': {
'id': 'the-comic-artist-and-his-assistants/s01e001',
'ext': 'mp4',
'title': 'the-comic-artist-and-his-assistants/s01e001',
'series': 'the-comic-artist-and-his-assistants',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'skip': 'Requires Authentication',
}]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
webpage = self._download_webpage(self._LOGIN_URL, None)
form = self._search_regex(
r'(?s)<form[^>]+action="/account/login"[^>]*>(.+?)</form>',
webpage, 'login form')
data = self._hidden_inputs(form)
data.update({
'Email': email,
'Password': password,
})
self._download_webpage(
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title, key = mobj.group('title', 'key')
video_id = '%s/%s' % (title, key)
settings = self._download_json(
'https://www.hidive.com/play/settings', video_id,
data=urlencode_postdata({
'Title': title,
'Key': key,
'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783',
}))
restriction = settings.get('restrictionReason')
if restriction == 'RegionRestricted':
self.raise_geo_restricted()
if restriction and restriction != 'None':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, restriction), expected=True)
formats = []
subtitles = {}
for rendition_id, rendition in settings['renditions'].items():
bitrates = rendition.get('bitrates')
if not isinstance(bitrates, dict):
continue
m3u8_url = url_or_none(bitrates.get('hls'))
if not m3u8_url:
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='%s-hls' % rendition_id, fatal=False))
cc_files = rendition.get('ccFiles')
if not isinstance(cc_files, list):
continue
for cc_file in cc_files:
if not isinstance(cc_file, list) or len(cc_file) < 3:
continue
cc_lang = cc_file[0]
cc_url = url_or_none(cc_file[2])
if not isinstance(cc_lang, compat_str) or not cc_url:
continue
subtitles.setdefault(cc_lang, []).append({
'url': cc_url,
})
self._sort_formats(formats)
season_number = int_or_none(self._search_regex(
r's(\d+)', key, 'season number', default=None))
episode_number = int_or_none(self._search_regex(
r'e(\d+)', key, 'episode number', default=None))
return {
'id': video_id,
'title': video_id,
'subtitles': subtitles,
'formats': formats,
'series': title,
'season_number': season_number,
'episode_number': episode_number,
}
| unlicense | -1,835,148,568,576,254,000 | 33.855932 | 92 | 0.529784 | false |
secondscoin/secondscoin | contrib/pyminer/pyminer.py | 385 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | -1,945,500,815,334,753,800 | 24.531746 | 84 | 0.648896 | false |
stefanreuther/bob | test/test_input_recipeset.py | 1 | 41799 | # Bob build tool
# Copyright (C) 2016 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import TestCase
from unittest.mock import Mock
import os
import textwrap
import yaml
from bob import DEBUG
from bob.input import RecipeSet
from bob.errors import ParseError, BobError
DEBUG['ngd'] = True
def pruneBuiltin(env):
return { k : v for k,v in env.items() if not k.startswith("BOB_") }
class RecipesTmp:
def setUp(self):
self.cwd = os.getcwd()
self.tmpdir = TemporaryDirectory()
os.chdir(self.tmpdir.name)
os.mkdir("recipes")
os.mkdir("classes")
def tearDown(self):
self.tmpdir.cleanup()
os.chdir(self.cwd)
def writeRecipe(self, name, content, layer=[]):
path = os.path.join("",
*(os.path.join("layers", l) for l in layer),
"recipes")
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, name+".yaml"), "w") as f:
f.write(textwrap.dedent(content))
def writeClass(self, name, content, layer=[]):
path = os.path.join("",
*(os.path.join("layers", l) for l in layer),
"classes")
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, name+".yaml"), "w") as f:
f.write(textwrap.dedent(content))
def writeConfig(self, content, layer=[]):
path = os.path.join("", *(os.path.join("layers", l) for l in layer))
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, "config.yaml"), "w") as f:
f.write(yaml.dump(content))
def generate(self, sandboxEnabled=False):
recipes = RecipeSet()
recipes.parse()
return recipes.generatePackages(lambda x,y: "unused",
sandboxEnabled=sandboxEnabled)
class TestUserConfig(TestCase):
def setUp(self):
self.cwd = os.getcwd()
def tearDown(self):
os.chdir(self.cwd)
def testEmptyTree(self):
"""Test parsing an empty receipe tree"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
recipeSet = RecipeSet()
recipeSet.parse()
def testDefaultEmpty(self):
"""Test parsing an empty default.yaml"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write(" ")
recipeSet = RecipeSet()
recipeSet.parse()
def testDefaultValidation(self):
"""Test that default.yaml is validated with a schema"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("wrongkey: foo\n")
recipeSet = RecipeSet()
self.assertRaises(ParseError, recipeSet.parse)
def testDefaultInclude(self):
"""Test parsing default.yaml including another file"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
with open("user.yaml", "w") as f:
f.write("whitelist: [FOO]\n")
recipeSet = RecipeSet()
recipeSet.parse()
assert "FOO" in recipeSet.envWhiteList()
def testDefaultIncludeMissing(self):
"""Test that default.yaml can include missing files"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()), {})
def testDefaultIncludeOverrides(self):
"""Test that included files override settings of default.yaml"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
f.write("environment:\n")
f.write(" FOO: BAR\n")
f.write(" BAR: BAZ\n")
with open("user.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: BAZ\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{ "FOO":"BAZ", "BAR":"BAZ" })
def testUserConfigMissing(self):
"""Test that missing user config fails parsing"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
recipeSet = RecipeSet()
recipeSet.setConfigFiles(["user"])
self.assertRaises(ParseError, recipeSet.parse)
def testUserConfigOverrides(self):
"""Test that user configs override default.yaml w/ includes"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - included\n")
f.write("environment:\n")
f.write(" FOO: BAR\n")
with open("included.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: BAZ\n")
with open("user.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: USER\n")
recipeSet = RecipeSet()
recipeSet.setConfigFiles(["user"])
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{ "FOO":"USER"})
def testDefaultRequire(self):
"""Test parsing default.yaml requiring another file"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("require:\n")
f.write(" - user\n")
with open("user.yaml", "w") as f:
f.write("whitelist: [FOO]\n")
recipeSet = RecipeSet()
recipeSet.parse()
assert "FOO" in recipeSet.envWhiteList()
def testDefaultRequireMissing(self):
"""Test that default.yaml barfs on required missing files"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("require:\n")
f.write(" - user\n")
recipeSet = RecipeSet()
self.assertRaises(ParseError, recipeSet.parse)
def testDefaultRequireLowerPrecedence(self):
"""Test that 'require' has lower precedence than 'include'"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - higher\n")
f.write("require:\n")
f.write(" - lower\n")
f.write("environment:\n")
f.write(" FOO: default\n")
f.write(" BAR: default\n")
f.write(" BAZ: default\n")
with open("lower.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAR: lower\n")
f.write(" BAZ: lower\n")
with open("higher.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAZ: higher\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{'FOO' : 'default', 'BAR' : 'lower', 'BAZ' : 'higher' })
def testDefaultRelativeIncludes(self):
"""Test relative includes work"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
os.makedirs("some/sub/dirs")
os.makedirs("other/directories")
with open("config.yaml", "w") as f:
f.write("policies:\n")
f.write(" relativeIncludes: True\n")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - some/first\n")
f.write("require:\n")
f.write(" - other/second\n")
f.write("environment:\n")
f.write(" FOO: default\n")
f.write(" BAR: default\n")
f.write(" BAZ: default\n")
with open("other/second.yaml", "w") as f:
f.write('require: ["directories/lower"]')
with open("other/directories/lower.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAR: lower\n")
f.write(" BAZ: lower\n")
with open("some/first.yaml", "w") as f:
f.write('include: ["sub/dirs/higher"]')
with open("some/sub/dirs/higher.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAZ: higher\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{'FOO' : 'default', 'BAR' : 'lower', 'BAZ' : 'higher' })
class TestDependencies(RecipesTmp, TestCase):
def testDuplicateRemoval(self):
"""Test that provided dependencies do not replace real dependencies"""
self.writeRecipe("root", """\
root: True
depends: [a, b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("a", """\
depends: [b]
provideDeps: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
# make sure "b" is addressable
p = packages.walkPackagePath("root/b")
self.assertEqual(p.getName(), "b")
def testIncompatible(self):
"""Incompatible provided dependencies must raise an error"""
self.writeRecipe("root", """\
root: True
depends: [a, b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("a", """\
depends:
-
name: c
environment: { FOO: A }
provideDeps: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
depends:
-
name: c
environment: { FOO: B }
provideDeps: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
buildVars: [FOO]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testCyclic(self):
"""Cyclic dependencies must be detected during parsing"""
self.writeRecipe("a", """\
root: True
depends: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
depends: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
depends: [a]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testCyclicSpecial(self):
"""Make sure cycles are detected on common sub-trees too"""
self.writeRecipe("root1", """\
root: True
depends: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("root2", """\
root: True
depends:
- name: b
if: "${TERMINATE:-1}"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
environment:
TERMINATE: "0"
depends: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
depends: [root2]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testIncompatibleNamedTwice(self):
"""Test that it is impossible to name the same dependency twice with
different variants."""
self.writeRecipe("root", """\
multiPackage:
"":
root: True
depends:
- name: root-lib
environment:
FOO: bar
- name: root-lib
use: [tools]
environment:
FOO: baz
buildScript: "true"
packageScript: "true"
lib:
packageVars: [FOO]
packageScript: "true"
provideTools:
t: "."
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
class TestNetAccess(RecipesTmp, TestCase):
def testOldPolicy(self):
"""Test that network access is enbled by default for old projects"""
self.writeRecipe("root", """\
root: True
""")
p = self.generate().walkPackagePath("root")
self.assertTrue(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
def testNewPolicy(self):
"""Test that network access is disabled by default"""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
""")
p = self.generate().walkPackagePath("root")
self.assertFalse(p.getBuildStep().hasNetAccess())
self.assertFalse(p.getPackageStep().hasNetAccess())
def testBuildNetAccess(self):
"""Test that a recipe can request network access for build step"""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root1", """\
root: True
buildNetAccess: True
buildScript: "true"
""")
self.writeRecipe("root2", """\
root: True
packageNetAccess: True
""")
packages = self.generate()
root1 = packages.walkPackagePath("root1")
self.assertTrue(root1.getBuildStep().hasNetAccess())
self.assertFalse(root1.getPackageStep().hasNetAccess())
root2 = packages.walkPackagePath("root2")
self.assertFalse(root2.getBuildStep().hasNetAccess())
self.assertTrue(root2.getPackageStep().hasNetAccess())
def testToolAccessBuild(self):
"""Test that a tool can force network access for build step."""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
buildTools: [compiler]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
compiler:
path: "."
netAccess: True
""")
p = self.generate().walkPackagePath("root")
self.assertTrue(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
def testToolAccessPackage(self):
"""Test that a tool can force network access for package step."""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
buildScript: "true"
packageTools: [compiler]
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
compiler:
path: "."
netAccess: True
""")
p = self.generate().walkPackagePath("root")
self.assertFalse(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
class TestToolEnvironment(RecipesTmp, TestCase):
def testEnvDefine(self):
"""Test that a tool can set environment."""
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
environment:
FOO: unset
BAR: unset
packageTools: [compiler]
packageVars: [FOO, BAR]
packageScript: "true"
""")
self.writeRecipe("tool", """\
environment:
LOCAL: "foo"
provideTools:
compiler:
path: "."
environment:
FOO: "${LOCAL}"
BAR: "bar"
""")
p = self.generate().walkPackagePath("root")
self.assertEqual(p.getPackageStep().getEnv(),
{"FOO":"foo", "BAR":"bar"})
def testEnvCollides(self):
"""Test that colliding tool environment definitions are detected."""
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
packageTools: [t1, t2]
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
t1:
path: "."
environment:
FOO: "foo"
BAR: "bar"
t2:
path: "."
environment:
BAR: "bar"
BAZ: "baz"
""")
packages = self.generate()
self.assertRaises(ParseError, packages.getRootPackage)
class TestFingerprints(RecipesTmp, TestCase):
"""Test fingerprint impact.
Everything is done with sandbox. Without sandbox the handling moves to the
build-id that is implemented in the build backend. This should be covered
by the 'fingerprints' black box test.
"""
def setUp(self):
super().setUp()
self.writeRecipe("sandbox", """\
provideSandbox:
paths: ["/"]
""")
def testCheckoutNotFingerprinted(self):
"""Checkout steps are independent of fingerprints"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
checkoutScript: "true"
buildScript: "true"
packageScript: "true"
multiPackage:
"1": { }
"2":
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-1")
r2 = packages.walkPackagePath("root-2")
self.assertEqual(r1.getCheckoutStep().getVariantId(),
r2.getCheckoutStep().getVariantId())
self.assertNotEqual(r1.getBuildStep().getVariantId(),
r2.getBuildStep().getVariantId())
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testCheckoutToolFingerpintIndependent(self):
"""Checkout steps are not influenced by tool fingerprint scripts.
But the build and package steps must be still affetcted, though.
"""
common = textwrap.dedent("""\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
- name: tool
use: [tools]
checkoutScript: "true"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("root1", common + "checkoutTools: [plainTool]\n")
self.writeRecipe("root2", common + "checkoutTools: [fingerprintedTool]\n")
self.writeRecipe("tool", """\
provideTools:
plainTool:
path: "."
fingerprintedTool:
path: "."
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root1")
r2 = packages.walkPackagePath("root2")
self.assertEqual(r1.getCheckoutStep().getVariantId(),
r2.getCheckoutStep().getVariantId())
self.assertNotEqual(r1.getBuildStep().getVariantId(),
r2.getBuildStep().getVariantId())
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testResultTransitive(self):
"""Fingerprint is transitive when using a tainted result"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
buildScript: "true"
multiPackage:
clean:
depends:
- dep-clean
tainted:
depends:
- dep-tainted
""")
self.writeRecipe("dep", """\
packageScript: "true"
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testToolNotTransitive(self):
"""Using a fingerprinted tool does not influence digest"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
buildTools: [ tool ]
buildScript: "true"
multiPackage:
clean:
depends:
- name: tools-clean
use: [tools]
tainted:
depends:
- name: tools-tainted
use: [tools]
""")
self.writeRecipe("tools", """\
packageScript: "true"
provideTools:
tool: "."
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
self.assertFalse(packages.walkPackagePath("root-clean/tools-clean")
.getPackageStep()._isFingerprinted())
self.assertTrue(packages.walkPackagePath("root-tainted/tools-tainted")
.getPackageStep()._isFingerprinted())
def testSandboxNotTransitive(self):
"""Using a fingerprinted sandbox does not influence digest"""
self.writeRecipe("root", """\
root: True
multiPackage:
clean:
depends:
- name: sandbox-clean
use: [tools]
tainted:
depends:
- name: sandbox-tainted
use: [tools]
""")
self.writeRecipe("sandbox", """\
packageScript: "true"
provideSandbox:
paths: ["/"]
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
self.assertFalse(packages.walkPackagePath("root-clean/sandbox-clean")
.getPackageStep()._isFingerprinted())
self.assertTrue(packages.walkPackagePath("root-tainted/sandbox-tainted")
.getPackageStep()._isFingerprinted())
def testByDefaultIncluded(self):
"""If no 'fingerprintIf' is given the 'fingerprintScript' must be evaluated.
Parsed without sandbox to make sure fingerprint scripts are considered.
"""
self.writeRecipe("root", """\
root: True
fingerprintScript: |
must-be-included
multiPackage:
clean: { }
tainted:
fingerprintScript: |
taint-script
fingerprintIf: True
""")
packages = self.generate()
ps = packages.walkPackagePath("root-clean").getPackageStep()
self.assertFalse(ps._isFingerprinted())
self.assertFalse("must-be-included" in ps._getFingerprintScript())
ps = packages.walkPackagePath("root-tainted").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("must-be-included" in ps._getFingerprintScript())
self.assertTrue("taint-script" in ps._getFingerprintScript())
def testToolCanEnable(self):
"""Tools must be able to amend and enable fingerprinting."""
self.writeRecipe("root", """\
root: True
depends:
- name: tools
use: [tools]
fingerprintIf: False
fingerprintScript: |
must-not-be-included
packageTools: [tool]
""")
self.writeRecipe("tools", """\
packageScript: "true"
provideTools:
tool:
path: "."
fingerprintScript: "tool-script"
fingerprintIf: True
""")
packages = self.generate()
ps = packages.walkPackagePath("root").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertFalse("must-not-be-included" in ps._getFingerprintScript())
self.assertTrue("tool-script" in ps._getFingerprintScript())
def testDisabledNotIncluded(self):
"""The 'fingerprintScript' must not be included if 'fingerprintIf' is False."""
self.writeClass("unspecified", """\
fingerprintScript: |
unspecified
""")
self.writeClass("static-disabled", """\
fingerprintIf: False
fingerprintScript: |
static-disabled
""")
self.writeClass("static-enabled", """\
fingerprintIf: True
fingerprintScript: |
static-enabled
""")
self.writeClass("dynamic", """\
fingerprintIf: "${ENABLE_FINGERPRINTING}"
fingerprintScript: |
dynamic
""")
self.writeRecipe("root", """\
root: True
inherit:
- unspecified
- static-disabled
- static-enabled
- dynamic
multiPackage:
dyn-enabled:
environment:
ENABLE_FINGERPRINTING: "true"
dyn-disabled:
environment:
ENABLE_FINGERPRINTING: "false"
""")
packages = self.generate()
ps = packages.walkPackagePath("root-dyn-enabled").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("unspecified" in ps._getFingerprintScript())
self.assertFalse("static-disabled" in ps._getFingerprintScript())
self.assertTrue("static-enabled" in ps._getFingerprintScript())
self.assertTrue("dynamic" in ps._getFingerprintScript())
ps = packages.walkPackagePath("root-dyn-disabled").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("unspecified" in ps._getFingerprintScript())
self.assertFalse("static-disabled" in ps._getFingerprintScript())
self.assertTrue("static-enabled" in ps._getFingerprintScript())
self.assertFalse("dynamic" in ps._getFingerprintScript())
class TestLayers(RecipesTmp, TestCase):
"""Test layer support.
Test the various properties of layers and their error handling.
"""
def setUp(self):
super().setUp()
self.writeConfig({
"bobMinimumVersion" : "0.15",
"layers" : [ "l1_n1", "l1_n2" ],
})
self.writeRecipe("root", """\
root: True
depends:
- foo
- bar
buildScript: "true"
packageScript: "true"
""")
self.writeConfig({
"bobMinimumVersion" : "0.15",
"layers" : [ "l2" ],
}, layer=["l1_n1"])
self.writeRecipe("foo", """\
depends:
- baz
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n1"])
self.writeRecipe("baz", """\
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n1", "l2"])
self.writeRecipe("bar", """\
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n2"])
def testRegular(self):
"""Test that layers can be parsed"""
self.generate()
def testRecipeObstruction(self):
"""Test that layers must not provide identical recipes"""
self.writeRecipe("foo", """\
depends:
- baz
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n2"])
self.assertRaises(ParseError, self.generate)
def testClassObstruction(self):
"""Test that layers must not provide identical classes"""
self.writeClass("c", "", layer=["l1_n1", "l2"])
self.writeClass("c", "", layer=["l1_n2"])
self.assertRaises(ParseError, self.generate)
def testMinimumVersion(self):
"""Test that (sub-)layers cannot request a higher minimum version"""
self.writeConfig({
"bobMinimumVersion" : "0.14",
"layers" : [ "l1_n1", "l1_n2" ],
})
self.assertRaises(ParseError, self.generate)
class TestIfExpression(RecipesTmp, TestCase):
""" Test if expressions """
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
depends:
- if: !expr |
"${USE_DEPS}" == "1"
depends:
- bar-1
- name: bar-2
if: !expr |
"${BAR}" == "bar2"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("bar", """\
multiPackage:
"1":
buildScript: "true"
"2":
buildScript: "true"
packageScript: "true"
""")
def testRegular(self):
"""Test that if expressions can be parsed"""
self.generate()
def testNested(self):
"""Test that nested if expressions are working"""
recipes = RecipeSet()
recipes.parse()
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "0", "BAR" : "bar2"})
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-1")
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-2")
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "1"})
ps.walkPackagePath("root/bar-1")
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-2")
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "1", "BAR" : "bar2"})
ps.walkPackagePath("root/bar-1")
ps.walkPackagePath("root/bar-2")
class TestNoUndefinedToolsPolicy(RecipesTmp, TestCase):
""" Test behaviour of noUndefinedTools policy"""
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
packageTools: ["undefined"]
packageScript: "true"
""")
def testOldBehaviour(self):
"""Test that undefined tools are permissable on old policy setting.
The tool is silently ignored and dropped.
"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : False },
})
packages = self.generate()
ps = packages.walkPackagePath("root").getPackageStep()
self.assertEqual(list(ps.getTools().keys()), [])
def testNewBehaviour(self):
"""Test that undefined tools generate a parsing error on new policy setting"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : True },
})
with self.assertRaises(ParseError):
packages = self.generate()
packages.walkPackagePath("root").getPackageStep()
class TestToolsWeak(RecipesTmp, TestCase):
"""Test behaviour or weak tools"""
def setUp(self):
super().setUp()
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : False },
})
self.writeRecipe("tool", """\
multiPackage:
"1":
provideTools:
tool: "."
packageScript: "foo"
"2":
provideTools:
tool: "."
packageScript: "bat"
""")
def testWeak(self):
"""Weak tools have no impact on package"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageToolsWeak: [tool]
""")
self.writeRecipe("r2", """\
root: True
depends:
- name: tool-2
use: [tools]
packageToolsWeak: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertEqual(r1.getVariantId(), r2.getVariantId())
self.assertNotEqual(r1.getTools()["tool"].getStep().getVariantId(),
r2.getTools()["tool"].getStep().getVariantId())
def testWeakMissing(self):
"""Weak tools that are missing still make a difference"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageTools: [tool]
""")
self.writeRecipe("r2", """\
root: True
packageTools: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertNotEqual(r1.getVariantId(), r2.getVariantId())
def testStrongOverride(self):
"""A weak and strong tool refence is treated as strong"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageTools: [tool]
packageToolsWeak: [tool]
""")
self.writeRecipe("r2", """\
root: True
depends:
- name: tool-2
use: [tools]
packageTools: [tool]
packageToolsWeak: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertNotEqual(r1.getVariantId(), r2.getVariantId())
class TestScmIgnoreUserPolicy(RecipesTmp, TestCase):
""" Test behaviour of scmIgnoreUser policy"""
def setUp(self):
super().setUp()
self.writeRecipe("git", """\
root: True
buildScript: "true"
packageScript: "true"
multiPackage:
a:
checkoutSCM:
scm: git
url: [email protected]:path/to/repo.git
b:
checkoutSCM:
scm: git
url: [email protected]:path/to/repo.git
""")
self.writeRecipe("url", """\
root: True
buildScript: "true"
packageScript: "true"
multiPackage:
a:
checkoutSCM:
scm: url
url: https://[email protected]/file
b:
checkoutSCM:
scm: url
url: https://[email protected]/file
""")
def testOldBehaviour(self):
"""Test that user name of URL is part of the variantId"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "scmIgnoreUser" : False },
})
packages = self.generate()
git_a = packages.walkPackagePath("git-a").getPackageStep()
git_b = packages.walkPackagePath("git-b").getPackageStep()
self.assertNotEqual(git_a.getVariantId(), git_b.getVariantId())
url_a = packages.walkPackagePath("url-a").getPackageStep()
url_b = packages.walkPackagePath("url-b").getPackageStep()
self.assertNotEqual(url_a.getVariantId(), url_b.getVariantId())
def testNewBehaviour(self):
"""Test that user name in URL is not part of variantId on new policy setting"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "scmIgnoreUser" : True },
})
packages = self.generate()
git_a = packages.walkPackagePath("git-a").getPackageStep()
git_b = packages.walkPackagePath("git-b").getPackageStep()
self.assertEqual(git_a.getVariantId(), git_b.getVariantId())
url_a = packages.walkPackagePath("url-a").getPackageStep()
url_b = packages.walkPackagePath("url-b").getPackageStep()
self.assertEqual(url_a.getVariantId(), url_b.getVariantId())
class TestPruneImportScmPolicy(RecipesTmp, TestCase):
""" Test behaviour of pruneImportScm policy"""
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
checkoutSCM:
scm: import
url: ./recipes
buildScript: "true"
packageScript: "true"
""")
def testOldBehaviour(self):
"""Test that prune was disabled in the past by default"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "pruneImportScm" : False },
})
pkg = self.generate().walkPackagePath("root")
self.assertFalse(pkg.getCheckoutStep().getScmList()[0].getProperties(False)["prune"])
def testNewBehaviour(self):
"""Test that prune is the new default"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "pruneImportScm" : True },
})
pkg = self.generate().walkPackagePath("root")
self.assertTrue(pkg.getCheckoutStep().getScmList()[0].getProperties(False)["prune"])
| gpl-3.0 | 4,505,859,482,286,768,000 | 32.817152 | 93 | 0.50646 | false |
usc-isi/essex-baremetal-support | nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py | 5 | 4484 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import select, Column, Integer, MetaData, Table
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
def upgrade_libvirt(instances, instance_types):
# Update instance_types first
tiny = None
for inst_type in instance_types.select().execute():
if inst_type['name'] == 'm1.tiny':
tiny = inst_type['id']
root_gb = 0
else:
root_gb = 10
instance_types.update()\
.values(root_gb=root_gb,
ephemeral_gb=inst_type['local_gb'])\
.where(instance_types.c.id == inst_type['id'])\
.execute()
# then update instances following same pattern
instances.update()\
.values(root_gb=10,
ephemeral_gb=instances.c.local_gb)\
.execute()
if tiny is not None:
instances.update()\
.values(root_gb=0,
ephemeral_gb=instances.c.local_gb)\
.where(instances.c.instance_type_id == tiny)\
.execute()
def upgrade_other(instances, instance_types):
for table in (instances, instance_types):
table.update().values(root_gb=table.c.local_gb,
ephemeral_gb=0).execute()
def check_instance_presence(migrate_engine, instances_table):
result = migrate_engine.execute(instances_table.select().limit(1))
return result.fetchone() is not None
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
data_present = check_instance_presence(migrate_engine, instances)
if data_present and not FLAGS.connection_type:
msg = ("Found instance records in database. You must specify "
"connection_type to run migration migration")
raise exception.Error(msg)
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
root_gb = Column('root_gb', Integer)
root_gb.create(table)
ephemeral_gb = Column('ephemeral_gb', Integer)
ephemeral_gb.create(table)
# Since this migration is part of the work to get all drivers
# working the same way, we need to treat the new root_gb and
# ephemeral_gb columns differently depending on what the
# driver implementation used to behave like.
if FLAGS.connection_type == 'libvirt':
upgrade_libvirt(instances, instance_types)
else:
upgrade_other(instances, instance_types)
default_local_device = instances.c.default_local_device
default_local_device.alter(name='default_ephemeral_device')
for table in (instances, instance_types):
table.drop_column('local_gb')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
local_gb = Column('local_gb', Integer)
local_gb.create(table)
try:
for table in (instances, instance_types):
if FLAGS.connection_type == 'libvirt':
column = table.c.ephemeral_gb
else:
column = table.c.root_gb
table.update().values(local_gb=column).execute()
except Exception:
for table in (instances, instance_types):
table.drop_column('local_gb')
raise
default_ephemeral_device = instances.c.default_ephemeral_device
default_ephemeral_device.alter(name='default_local_device')
for table in (instances, instance_types):
table.drop_column('root_gb')
table.drop_column('ephemeral_gb')
| apache-2.0 | 4,897,868,114,173,773,000 | 33.492308 | 78 | 0.636708 | false |
Antreasgr/ol3 | bin/check-whitespace.py | 29 | 1633 | import logging
import re
import sys
logging.basicConfig(format='%(asctime)s %(name)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger('check-whitespace')
CR_RE = re.compile(r'\r')
LEADING_WHITESPACE_RE = re.compile(r'\s+')
TRAILING_WHITESPACE_RE = re.compile(r'\s+\n\Z')
NO_NEWLINE_RE = re.compile(r'[^\n]\Z')
ALL_WHITESPACE_RE = re.compile(r'\s+\Z')
def check_whitespace(*filenames):
errors = 0
for filename in sorted(filenames):
whitespace = False
for lineno, line in enumerate(open(filename, 'rU')):
if lineno == 0 and LEADING_WHITESPACE_RE.match(line):
logger.info('%s:%d: leading whitespace', filename, lineno + 1)
errors += 1
if CR_RE.search(line):
logger.info('%s:%d: carriage return character in line',
filename, lineno + 1)
errors += 1
if TRAILING_WHITESPACE_RE.search(line):
logger.info('%s:%d: trailing whitespace', filename, lineno + 1)
errors += 1
if NO_NEWLINE_RE.search(line):
logger.info('%s:%d: no newline at end of file', filename,
lineno + 1)
errors += 1
whitespace = ALL_WHITESPACE_RE.match(line)
if whitespace:
logger.info('%s: trailing whitespace at end of file', filename)
errors += 1
return errors
if __name__ == "__main__":
errors = check_whitespace(*sys.argv[1:])
if errors > 0:
logger.error('%d whitespace errors' % (errors,))
sys.exit(1)
| bsd-2-clause | -5,825,093,877,266,534,000 | 34.5 | 79 | 0.554195 | false |
stuyCTF/stuyCTF-Platform | api/api/common.py | 11 | 5349 | """ The common module contains general-purpose functions potentially used by multiple modules in the system."""
import uuid
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, InvalidName
from werkzeug.contrib.cache import SimpleCache
from voluptuous import Invalid, MultipleInvalid
from hashlib import md5
allowed_protocols = []
allowed_ports = []
cache = SimpleCache()
admin_emails = None
__connection = None
__client = None
mongo_addr = "127.0.0.1"
mongo_port = 27017
mongo_db_name = ""
external_client = None
def get_conn():
"""
Get a database connection
Ensures that only one global database connection exists per thread.
If the connection does not exist a new one is created and returned.
"""
if external_client is not None:
return external_client
global __client, __connection
if not __connection:
try:
__client = MongoClient(mongo_addr, mongo_port)
__connection = __client[mongo_db_name]
except ConnectionFailure:
raise SevereInternalException("Could not connect to mongo database {} at {}:{}".format(mongo_db_name, mongo_addr, mongo_port))
except InvalidName as error:
raise SevereInternalException("Database {} is invalid! - {}".format(mongo_db_name, error))
return __connection
def esc(s):
"""
Escapes a string to prevent html injection
Returns a string with special HTML characters replaced.
Used to sanitize output to prevent XSS. We looked at
alternatives but there wasn't anything of an appropriate
scope that we could find. In the long-term this should be
replaced with a proper sanitization function written by
someone else."""
return s\
.replace('&', '&')\
.replace('<', '<')\
.replace('>', '>')\
.replace('"', '"')\
.replace("'", ''')
def token():
"""
Generate a token, should be random but does not have to be secure necessarily. Speed is a priority.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hashes a string
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class APIException(Exception):
"""
Exception thrown by the API.
"""
data = {}
def WebSuccess(message=None, data=None):
"""
Successful web request wrapper.
"""
return {
"status": 1,
"message": message,
"data": data
}
def WebError(message=None, data=None):
"""
Unsuccessful web request wrapper.
"""
return {
"status": 0,
"message": message,
"data": data
}
class WebException(APIException):
"""
Errors that are thrown that need to be displayed to the end user.
"""
pass
class InternalException(APIException):
"""
Exceptions thrown by the API constituting mild errors.
"""
pass
class SevereInternalException(InternalException):
"""
Exceptions thrown by the API constituting critical errors.
"""
pass
def flat_multi(multidict):
"""
Flattens any single element lists in a multidict.
Args:
multidict: multidict to be flattened.
Returns:
Partially flattened database.
"""
flat = {}
for key, values in multidict.items():
flat[key] = values[0] if type(values) == list and len(values) == 1 \
else values
return flat
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our APIException
Args:
callback_tuples: a callback_tuple should contain (status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Trys to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
APIException with the given error code and msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise WebException(msg)
return value
return v
def validate(schema, data):
"""
A wrapper around the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
APIException with status 0 and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise APIException(0, None, error.msg)
def safe_fail(f, *args, **kwargs):
"""
Safely calls a function that can raise an APIException.
Args:
f: function to call
*args: positional arguments
**kwargs: keyword arguments
Returns:
The function result or None if an exception was raised.
"""
try:
return f(*args, **kwargs)
except APIException:
return None
| mit | 4,714,867,876,233,966,000 | 23.424658 | 138 | 0.617312 | false |
liberation/sesql | sesql/management/commands/build_search_query_index.py | 1 | 4441 | # -*- coding: utf-8 -*-
# Copyright (c) Pilot Systems and Libération, 2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be runned in a cron to process search histories and compute stats
"""
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
class Command(BaseCommand):
help = """Build SearchQuery index"""
option_list = BaseCommand.option_list + (
make_option('-e','--erode',
action='store_true',
dest='erode',
help = 'tell if we must erode result or not'),
make_option('-f','--filter',
dest ='filter',
type='int',
default=config.HISTORY_DEFAULT_FILTER,
help = 'how many time a search must occur to be treated'))
def handle(self, *apps, **options):
self.process_hits(options['filter'])
if options['erode']:
self.erode()
def erode(self):
for search_query in SearchQuery.objects.all():
search_query.pondered_search_nb = (config.HISTORY_ALPHA
* search_query.pondered_search_nb
+ (1-config.HISTORY_ALPHA)
* search_query.nb_recent_search)
search_query.nb_recent_search = 0
search_query.save()
def process_hits(self, filter_nb):
last_hits = SearchHit.objects.all()
processed_hits = []
for hit in last_hits:
query = hit.query
# blacklist
if query in config.HISTORY_BLACKLIST:
continue
if hit.nb_results < filter_nb:
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
continue
# manual get_or_create
try:
search_query = SearchQuery.objects.get(query=query)
created = False
except SearchQuery.DoesNotExist:
search_query = SearchQuery(query=query)
created = True
# if it's a new one, initialize it
if created:
search_query.phonex = phonex(query)
# clean the query, the '_' char cause bugy clean_query
query = query.replace('_', '')
lems = lemmatize(query.split())
clean_query = [lem for lem in lems if lem]
clean_query = ' '.join(clean_query)
clean_phonex = phonex(clean_query)
search_query.clean_query = clean_query
search_query.clean_phonex = clean_phonex
search_query.nb_total_search = 0
search_query.pondered_search_nb = 0
search_query.nb_recent_search = 0
search_query.nb_results = hit.nb_results
search_query.nb_total_search += 1
search_query.pondered_search_nb += 1
search_query.nb_recent_search += 1
weight = (search_query.pondered_search_nb * config.HISTORY_BETA +
search_query.nb_results * config.HISTORY_GAMMA)
search_query.weight = weight
search_query.save()
# we can now create SearchHitHistoric
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
| gpl-2.0 | -3,469,848,174,415,488,500 | 33.6875 | 80 | 0.562838 | false |
Antiun/yelizariev-addons | web_sessions_management/main.py | 16 | 3811 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp.osv import fields, osv, orm
from datetime import date, datetime, time, timedelta
from openerp.addons.base.ir.ir_cron import _intervalTypes
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.http import request
from openerp.tools.translate import _
from openerp import http
_logger = logging.getLogger(__name__)
class Home_tkobr(openerp.addons.web.controllers.main.Home):
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
openerp.addons.web.controllers.main.ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db,
request.params['login'], request.params['password'])
if uid is not False:
self.save_session(request.cr, uid, request.context)
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = 'Login failed due to one of the following reasons:'
values['reason1'] = '- Wrong login/password'
values['reason2'] = '- User not allowed to have multiple logins'
values['reason3'] = '- User not allowed to login at this specific time or day'
return request.render('web.login', values)
def save_session(self, cr, uid, context=None):
if not request.uid:
request.uid = openerp.SUPERUSER_ID
sid = request.httprequest.session.sid
uid = request.httprequest.session.uid
session_obj = request.registry.get('ir.sessions')
user_obj = request.registry.get('res.users')
u_exp_date, seconds = user_obj.get_expiring_date(cr, request.uid,
uid, context)
return session_obj.create(cr, SUPERUSER_ID, {'user_id': uid,
'session_id': sid,
'expiration_seconds': seconds,
'date_login': fields.datetime.now(),
'date_last_activity': fields.datetime.now(),
'logged_in': True},
context=context)
| lgpl-3.0 | 5,462,133,416,806,512,000 | 40.423913 | 90 | 0.614537 | false |
hieupham007/Titanium_Mobile | apidoc/generators/jsduck_generator.py | 1 | 19686 | #!/usr/bin/env python
#
# Copyright (c) 2011 Appcelerator, Inc. All Rights Reserved.
# Licensed under the Apache Public License (version 2)
import os, sys, re
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(this_dir, "..")))
from common import dict_has_non_empty_member
# We package the python markdown module already in /support/module/support/markdown.
module_support_dir = os.path.abspath(os.path.join(this_dir, "..", "..", "support", "module", "support"))
sys.path.append(module_support_dir)
import markdown
android_support_dir = os.path.abspath(os.path.join(this_dir, "..", "..", "support", "android"))
sys.path.append(android_support_dir)
from tilogger import *
log = TiLogger(None)
all_annotated_apis = None
apis = None
# Avoid obliterating our four spaces pattern with a careless %s:/ /^I/
FOUR_SPACES=' ' + ' '
# compiling REs ahead of time, since we use them heavily.
link_parts_re = re.compile(r"(?:\[([^\]]+?)\]\(([^\)\s]+?)\)|\<([^\>\s]+)\>)", re.MULTILINE)
find_links_re = re.compile(r"(\[[^\]]+?\]\([^\)\s]+?\)|\<[^\>\s]+\>)", re.MULTILINE)
html_scheme_re = re.compile(r"^http:|^https:")
doc_site_url_re = re.compile(r"http://docs.appcelerator.com/titanium/.*(#!.*)")
# we use this to distinguish inline HTML tags from Markdown links. Not foolproof, and a
# we should probably find a better technique in the long run.
html_element_re = re.compile("([a-z]|\/)")
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
except:
print >> sys.stderr, "You don't have Pygments!\n"
print >> sys.stderr, "You can install it with:\n"
print >> sys.stderr, "> easy_install Pygments"
print ""
sys.exit(1)
# write unicode strings safely
def write_utf8(file, string):
file.write(string.encode('utf8', 'replace'))
def convert_string_to_jsduck_link(obj_specifier):
global all_annotated_apis
if obj_specifier in all_annotated_apis:
return obj_specifier
else:
# Maybe a method, property or event
parts = obj_specifier.split(".")
if len(parts) > 1:
parent = ".".join(parts[:-1])
member_name = parts[-1]
if parent in all_annotated_apis:
obj = all_annotated_apis[parent]
list_names = {
"methods": 'method-',
"properties": 'property-',
"events": 'event-'
}
for list_name in list_names.keys():
if hasattr(obj, list_name) and type(getattr(obj, list_name)) == list:
for m in getattr(obj, list_name):
if m.name == member_name:
return parent + '#' + list_names[list_name] + member_name
else:
return "#" + obj_specifier
return obj_specifier
def process_markdown_links(s):
new_string = s
results = find_links_re.findall(new_string)
if results is not None and len(results) > 0:
for link in results:
match = link_parts_re.match(link)
if match == None:
print "no match:" + link
continue
# Process links with a defined name [foo](url)
if match.group(1) != None and match.group(2)!= None:
url = match.group(2)
name = match.group(1)
# For simple markdown links, such as <Titanium.Analytics> or <www.google.com>
# skip links that look like HTML elements (<span>).
elif match.group(3) != None and not html_element_re.match(link, 1):
url = match.group(3)
name = None
# Otherwise, our "link" was probably an HTML tag, so we leave it alone
else:
continue
# Process URLs
docs_site_link = False
api_link = False
# For links back to the doc site -- guides pages, videos, etc.
# extract just the part following the hash, to avoid re-loading the site
# [Quick Start](http://docs.appcelerator.com/titanium/2.1/index.html#!/guide/Quick_Start) ->
# [Quick Start](#!/guide/Quick_Start Quick Start)
#
# Generic absolute URLs pass through unchanged
# [Facebook Graph API](http://developers.facebook.com/docs/reference/api/) -> unchanged
if url.startswith("http"):
url_match = doc_site_url_re.match(url)
if url_match:
url = url_match.group(1)
docs_site_link = True
if not name:
name = url
new_string = new_string.replace(link, "[%s](%s)" % (name, url))
else:
# Reformat API object links so jsduck can process them.
# [systemId](Titanium.XML.Entity.systemId -> {@link Titanium.XML.Entity#systemId systemId}
url = convert_string_to_jsduck_link(url)
if name:
new_string = new_string.replace(link, "{@link %s %s}" % (url, name))
else:
new_string = new_string.replace(link, "{@link %s}" % url)
return new_string
def markdown_to_html(s, obj=None):
if s is None or len(s) == 0:
return ""
if "<" in s or "[" in s:
s = process_markdown_links(s)
return markdown.markdown(s)
# remove <p> and </p> if a string is enclosed with them
def remove_p_tags(str):
if str is None or len(str) == 0:
return ""
if str.startswith("<p>"):
str = str[3:]
if str.endswith("</p>"):
str = str[:-4]
return str
# Print two digit version if third digit is 0.
def format_version(version_str):
digits = version_str.split(".")
if len(digits) <= 2:
return version_str
else:
if digits[2] == '0':
return ".".join(digits[0:2])
else:
return ".".join(digits)
def output_properties_for_obj(annotated_obj):
obj = annotated_obj.api_obj
res = []
# Only output platforms if platforms or since versions are different from
# containing object.
if obj.has_key("platforms") or obj.has_key("since"):
for platform in annotated_obj.platforms:
res.append("@platform %s %s" % (platform["name"], format_version(platform["since"])))
if obj.has_key("availability") and obj['availability'] == 'creation':
res.append("@creationOnly")
if obj.has_key("availability") and obj['availability'] == 'not-creation':
res.append("@nonCreation")
if obj.has_key("extends"):
res.append("@extends %s" % (obj["extends"]))
if(len(res) == 0):
return ""
return "\t * " + "\n\t * ".join(res) + "\n"
# @deprecated and @removed are multi-line tags, so this must be
# inserted after the summary and description, or the summary will get
# included as part of the deprecation.
def output_deprecation_for_obj(annotated_obj):
obj = annotated_obj.api_obj
if obj.has_key("deprecated"):
if obj["deprecated"].has_key("removed"):
str = "@removed %s" % (obj["deprecated"]["removed"])
else:
str = "@deprecated %s" % (obj["deprecated"]["since"])
if obj["deprecated"].has_key("notes"):
str += " %s" % markdown_to_html(obj["deprecated"]["notes"])
str = str.replace("\n", "\n\t * ")
return "\t * %s\n" % str
else:
return ""
def output_example(desc, code, convert_empty_code):
if len(desc) == 0 and len(code) == 0:
return None
# sometimes if there is only one example
if len(code) == 0 and convert_empty_code == True:
# no code? probably desc contains the code
code = desc
desc = []
# determine if we need t remove leading spaces from all code lines
need_strip = True
for line in code:
if len(line) > 0 and line[0:4] != FOUR_SPACES:
need_strip = False
break
if need_strip:
stripped_code = []
for line in code:
stripped_code.append(line[4:])
code = stripped_code
# hack - insert ­ to avoid having closing comment sign within JSDUck markup
code = "\n".join(code).replace("&", "&").replace("<", "<").replace(">", ">").replace("*/", "*­/")
desc = "\n".join(desc)
if len(desc) > 0 and len(code) > 0:
return "<p>%s</p><pre>%s</pre>" % (markdown_to_html(desc), code)
elif len(desc) == 0 and len(code) > 0:
return "<pre>%s</pre>" % (code)
elif len(desc) > 0 and len(code) == 0:
return "<p>%s</p>" % markdown_to_html(desc)
def output_examples_for_obj(obj):
res = []
if obj.has_key("examples"):
if len(obj['examples']) == 1:
res.append("<h3>Example</h3>")
else:
res.append("<h3>Examples</h3>")
for example in obj['examples']:
res.append("<h4>%s</h4>" % (example['title']))
body = example['example']
code = []
desc = []
desc_finished = False
prev_line_empty = False
first_code_block = True
for line in body.splitlines():
# parse description part until code starts
# skip empty string between desc and code
if not desc_finished:
if prev_line_empty == True and (line.find(FOUR_SPACES) == 0 or line.find('\t') == 0):
desc_finished = True
else:
# parsing code until code finishes or another description starts
if line.find(FOUR_SPACES) != 0 and line.find('\t') != 0 and len(line) != 0:
# code block finished - another description started - flush content
desc_finished = False
res.append(output_example(desc, code, first_code_block))
first_code_block = False
code = []
desc = []
if not desc_finished:
desc.append(line)
else:
code.append(line)
prev_line_empty = len(line.strip()) == 0
res.append(output_example(desc, code, first_code_block))
res = filter(None, res)
if(len(res) == 0):
return ""
return "\t * " + "\n\t * ".join(res) + "\n"
def transform_type(type):
if isinstance(type, list):
# type consist of more then one type
return "/".join(map((lambda typ: transform_type(typ)), type))
if type.startswith("Array<"):
type = re.sub(r'Array<(.*?)>', r'\1', type)
type = transform_type(type) + "[]"
elif type == "Dictionary":
type = "Dictionary"
elif type.startswith("Dictionary<"):
type = re.sub(r'Dictionary<(.*?)>', r'\1', type)
type = "Dictionary<%s>" % (type)
elif type == 'Callback':
type = "Function"
elif type.startswith("Callback<"):
type = re.sub(r'Callback<(.*?)>', r'\1', type)
type = "Callback<%s>" % (type)
return type
def has_ancestor(one_type, ancestor_name):
if one_type["name"] == ancestor_name:
return True
if "extends" in one_type and one_type["extends"] == ancestor_name:
return True
elif "extends" not in one_type:
if ancestor_name == 'Global':
# special case for "Global" types - they do not have @extends statement
return one_type["name"].find('Global') == 0
return False
else:
parent_type_name = one_type["extends"]
if (parent_type_name is None or not isinstance(parent_type_name, basestring) or
parent_type_name.lower() == "object"):
return False
if not parent_type_name in apis:
log.warn("%s extends %s but %s type information not found" % (one_type["name"],
parent_type_name, parent_type_name))
return False
return has_ancestor(apis[parent_type_name], ancestor_name)
def get_summary_and_description(api_obj):
summary = None
desc = None
if api_obj.has_key("summary"):
summary = markdown_to_html(api_obj["summary"])
if api_obj.has_key("description"):
desc = markdown_to_html(api_obj["description"])
res = u""
if summary != None:
res = u"\t * " + summary + "\n"
if desc != None:
res += u"\t * @description " + desc + "\n"
elif desc != None:
# use description if there is no summary
res = u"\t * " + desc
return res
# Side effect of hiding properties is that the accessors do not get hidden
# Explicitly hide accessors for JSDuck
def hide_accessors(parent_name, property_name):
res = ""
parent_obj = all_annotated_apis[parent_name].api_obj
if "properties" in parent_obj:
parent_properties = parent_obj["properties"]
property_dict = dict((p["name"], p) for p in parent_properties)
if property_name in property_dict:
setter = True;
getter = True;
if "accessors" in property_dict[property_name] and not property_dict[property_name]["accessors"]:
return res
if "availability" in property_dict[property_name] and property_dict[property_name]["availability"] == "creation":
setter = False;
if "permission" in property_dict[property_name]:
if property_dict[property_name]["permission"] == "read-only":
setter = False;
elif property_dict[property_name]["permission"] == "write-only":
getter = False;
upperFirst = property_name[0].upper() + property_name[1:]
if getter:
getter = "get" + upperFirst
res += "/**\n\t * @method " + getter + " \n\t * @hide\n*/\n"
if setter:
setter = "set" + upperFirst
res += "/**\n\t * @method " + setter + " \n\t * @hide\n*/\n"
if "extends" in parent_obj:
parent_name = parent_obj["extends"]
return res + hide_accessors(parent_name, property_name)
else:
return res
def generate(raw_apis, annotated_apis, options):
global all_annotated_apis, apis
all_annotated_apis = annotated_apis
apis = raw_apis
if options is not None and (not hasattr(options, "output") or options.output is None or len(options.output) == 0):
log.error ("'output' option not provided")
if options is not None and not os.path.exists(options.output):
os.makedirs(options.output)
# Write the output files
if options is not None:
log.info("Creating titanium.js in %s" % options.output)
output = open(os.path.join(options.output, "titanium.js"), "w")
for name in annotated_apis:
annotated_obj = annotated_apis[name]
write_utf8(output, "/**\n\t * @class %s\n" % (annotated_obj.name))
if annotated_obj.typestr == "module" and annotated_obj.parent is None:
write_utf8(output, '\t * @typestr Module\n')
else:
typestr = ''
if annotated_obj.typestr == "module":
typestr = "Submodule"
elif annotated_obj.typestr == "proxy":
typestr = "Object"
elif annotated_obj.typestr == "method":
typestr = "Function"
elif annotated_obj.typestr == "property":
typestr = "Property"
elif annotated_obj.typestr == "event":
typestr = "Event"
elif annotated_obj.typestr == "parameter":
typestr = "Parameter"
if len(typestr) > 0 and annotated_obj.parent is not None:
write_utf8(output, '\t * @typestr %s of %s\n' % (typestr, annotated_obj.parent.name))
else:
write_utf8(output, '\t * @typestr %s\n' % (typestr))
if not (has_ancestor(raw_apis[name], "Titanium.Proxy") or has_ancestor(raw_apis[name], "Global")):
write_utf8(output, "\t * @pseudo\n")
write_utf8(output, output_properties_for_obj(annotated_obj))
write_utf8(output, get_summary_and_description(annotated_obj.api_obj))
write_utf8(output, output_examples_for_obj(annotated_obj.api_obj))
write_utf8(output, output_deprecation_for_obj(annotated_obj))
write_utf8(output, "\t */\n\n")
p = annotated_obj.properties
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
getter_ok = True
setter_ok = True
if k.permission == "read-only" or k.availability == "creation":
setter_ok = False
if k.permission == "write-only":
getter_ok = False
if "accessors" in obj and not obj["accessors"]:
getter_ok = setter_ok = False
if k.default is not None:
default_val = remove_p_tags(markdown_to_html(str(k.default)))
write_utf8(output, '/**\n\t * @property [%s=%s]\n' % (k.name, default_val))
else:
write_utf8(output, "/**\n\t * @property %s\n" % (k.name))
if obj.has_key('type'):
write_utf8(output, "\t * @type %s\n" % (transform_type(obj["type"])))
if obj.has_key('permission') and obj["permission"] == "read-only":
write_utf8(output, "\t * @readonly\n")
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
write_utf8(output, output_deprecation_for_obj(k))
write_utf8(output, " */\n\n")
p = annotated_obj.methods
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
write_utf8(output, "/**\n\t * @method %s\n" % (k.name))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
write_utf8(output, output_deprecation_for_obj(k))
if obj.has_key("parameters"):
for param in obj["parameters"]:
if "summary" in param:
summary = param["summary"]
if "repeatable" in param and param["repeatable"]:
repeatable = "..."
else:
repeatable = ""
type = "{" + transform_type(param["type"]) + repeatable + "}" if param.has_key("type") else ""
optional = "(optional)" if param.has_key('optional') and param["optional"] == True else ""
if param.has_key('default'):
default_val = remove_p_tags(markdown_to_html(str(param['default'])))
write_utf8(output, "\t * @param %s [%s=%s] %s\n\t * %s\n" % (type, param['name'], default_val, optional, markdown_to_html(summary)))
else:
write_utf8(output, "\t * @param %s %s %s\n\t * %s\n" % (type, param['name'], optional, markdown_to_html(summary)))
if obj.has_key("returns"):
returntypes = obj["returns"]
summary = ""
# check for the object form first
if "type" in returntypes:
type = "{" + transform_type(returntypes["type"]) + "}"
summary = returntypes["summary"] if "summary" in returntypes else ""
else:
# could be an array, check if it's iterable
if hasattr(returntypes, "__getitem__") or hasattr(returntypes, "__iter__"):
type = ""
for one_returntype in returntypes:
if type == "":
type = "{" + transform_type(one_returntype["type"])
else:
type = type + "/" + transform_type(one_returntype["type"])
# Can't handle multiple summaries, only take one.
if summary == "" and summary in one_returntype:
summary = one_returntype["summary"]
type = type + "}"
else:
log.warn("returns for %s should be an array or a dict." % obj["name"]);
write_utf8(output, "\t * @return %s %s\n" % (type, markdown_to_html(summary)))
else:
write_utf8(output, "\t * @return void\n")
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, "\t*/\n\n")
p = annotated_obj.events
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
write_utf8(output, "/**\n\t * @event %s\n" % (k.name))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
if k.properties is not None:
for param in k.properties:
if "deprecated" in param.api_obj:
deprecated = "(deprecated)"
else:
deprecated = ""
platforms = "("+" ".join(param.api_obj['platforms'])+")" if param.api_obj.has_key('platforms') and param.api_obj["platforms"] else ""
if param.api_obj.has_key('type'):
write_utf8(output, "\t * @param {%s} %s %s %s\n" % (transform_type(param.api_obj['type']), deprecated, platforms, param.name))
else:
write_utf8(output, "\t * @param %s %s %s\n" % (deprecated, platforms, param.name))
write_utf8(output, get_summary_and_description(param.api_obj))
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, "\t*/\n\n")
# handle excluded members
api_obj = annotated_obj.api_obj
if "excludes" in api_obj:
for member_type in [ "properties", "methods", "events" ]:
if member_type in api_obj["excludes"]:
annotation_string = { "properties":"@property", "methods":"@method",
"events":"@event" }[member_type]
excluded_members = api_obj["excludes"][member_type]
for one_member in excluded_members:
write_utf8(output, "/**\n\t * %s %s \n\t * @hide\n*/\n" % (annotation_string, one_member))
# Explicitly hide accessors
if member_type == "properties" and "extends" in api_obj:
parent_name = api_obj["extends"]
hide_methods = hide_accessors(parent_name, one_member)
if hide_methods:
write_utf8(output, "%s" % (hide_methods))
output.close()
| apache-2.0 | -4,725,235,165,777,018,000 | 34.598553 | 139 | 0.636645 | false |
peoplepower/botengine | com.ppc.Bot/domain.py | 1 | 2296 | '''
Created on May 25, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Organization short name, which allows us to send emails to this organization's administrators
ORGANIZATION_SHORT_NAME = "family"
# NOTE: Name of the service
SERVICE_NAME = "People Power Family"
# Name of the pack of products people can purchase
PACK_NAME = "Family Pack"
# URL to purchase the pack of products
PACK_PURCHASE_URL = ""
# Professional Monitoring Subscription Name
PROFESSIONAL_MONITORING_SUBSCRIPTION_NAME = "Avantguard"
# Professional Monitoring Callback Number
PROFESSIONAL_MONITORING_CALLBACK_NUMBER = "1-844-950-0582"
# Notification case-sensitive brand, which can be different from the organization short name. Use this to force a specific branded template.
# See https://presence.atlassian.net/wiki/spaces/CLOUD/pages/23385710/Branding+Configuration
ORGANIZATION_BRAND = ""
# Default language for this brand
DEFAULT_LANGUAGE = 'en'
# Default timezone for this brand
DEFAULT_TIMEZONE = 'US/Pacific'
# MixPanel token
MIXPANEL_TOKEN = None
# Amplitude tokens
AMPLITUDE_TOKENS = {
"app.presencepro.com": "",
"sboxall.presencepro.com": ""
}
# iOS download URL
APP_IOS_URL = ""
# Android download URL
APP_ANDROID_URL = ""
# Customer support scheduling URL
CUSTOMER_SUPPORT_URL = ""
# True to declare that the people who run this service are part of a "drug trial" and in the control group
DRUG_TESTING_CONTROL_GROUP = False
# True to allow security events to escalate to professional monitoring. False to keep it trusted circle monitored.
ALLOW_PROFESSIONAL_MONITORING_SECURITY = False
# True to allow the emergency call center to be contacted twice if the action plan calls for it, usually to dispatch.
ALLOW_SECONDARY_PROFESSIONAL_MONITORING = True
# Available services to tailor messaging to the users
CARE_SERVICES = True
ENERGY_SERVICES = True
SECURITY_SERVICES = True
# True if this brand can support a siren.
HAS_SIREN = True
# Automatic tagging for people who run this service.
ADD_TAGS = []
REMOVE_TAGS = []
# User facing modes. { "MODE": "User Facing Name" }
USER_FACING_MODES = {
"HOME": "OFF",
"AWAY": "AWAY",
"STAY": "STAY",
"TEST": "TEST"
}
| apache-2.0 | 8,950,259,843,876,554,000 | 26.662651 | 140 | 0.747822 | false |
thebongy/MakeMyOutputs | docx/styles/styles.py | 12 | 5625 | # encoding: utf-8
"""
Styles object, container for all objects in the styles part.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from warnings import warn
from . import BabelFish
from .latent import LatentStyles
from ..shared import ElementProxy
from .style import BaseStyle, StyleFactory
class Styles(ElementProxy):
"""
A collection providing access to the styles defined in a document.
Accessed using the :attr:`.Document.styles` property. Supports ``len()``,
iteration, and dictionary-style access by style name.
"""
__slots__ = ()
def __contains__(self, name):
"""
Enables `in` operator on style name.
"""
internal_name = BabelFish.ui2internal(name)
for style in self._element.style_lst:
if style.name_val == internal_name:
return True
return False
def __getitem__(self, key):
"""
Enables dictionary-style access by UI name. Lookup by style id is
deprecated, triggers a warning, and will be removed in a near-future
release.
"""
style_elm = self._element.get_by_name(BabelFish.ui2internal(key))
if style_elm is not None:
return StyleFactory(style_elm)
style_elm = self._element.get_by_id(key)
if style_elm is not None:
msg = (
'style lookup by style_id is deprecated. Use style name as '
'key instead.'
)
warn(msg, UserWarning)
return StyleFactory(style_elm)
raise KeyError("no style with name '%s'" % key)
def __iter__(self):
return (StyleFactory(style) for style in self._element.style_lst)
def __len__(self):
return len(self._element.style_lst)
def add_style(self, name, style_type, builtin=False):
"""
Return a newly added style object of *style_type* and identified
by *name*. A builtin style can be defined by passing True for the
optional *builtin* argument.
"""
style_name = BabelFish.ui2internal(name)
if style_name in self:
raise ValueError("document already contains style '%s'" % name)
style = self._element.add_style_of_type(
style_name, style_type, builtin
)
return StyleFactory(style)
def default(self, style_type):
"""
Return the default style for *style_type* or |None| if no default is
defined for that type (not common).
"""
style = self._element.default_for(style_type)
if style is None:
return None
return StyleFactory(style)
def get_by_id(self, style_id, style_type):
"""
Return the style of *style_type* matching *style_id*. Returns the
default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*.
"""
if style_id is None:
return self.default(style_type)
return self._get_by_id(style_id, style_type)
def get_style_id(self, style_or_name, style_type):
"""
Return the id of the style corresponding to *style_or_name*, or
|None| if *style_or_name* is |None|. If *style_or_name* is not
a style object, the style is looked up using *style_or_name* as
a style name, raising |ValueError| if no style with that name is
defined. Raises |ValueError| if the target style is not of
*style_type*.
"""
if style_or_name is None:
return None
elif isinstance(style_or_name, BaseStyle):
return self._get_style_id_from_style(style_or_name, style_type)
else:
return self._get_style_id_from_name(style_or_name, style_type)
@property
def latent_styles(self):
"""
A |LatentStyles| object providing access to the default behaviors for
latent styles and the collection of |_LatentStyle| objects that
define overrides of those defaults for a particular named latent
style.
"""
return LatentStyles(self._element.get_or_add_latentStyles())
def _get_by_id(self, style_id, style_type):
"""
Return the style of *style_type* matching *style_id*. Returns the
default for *style_type* if *style_id* is not found or if the style
having *style_id* is not of *style_type*.
"""
style = self._element.get_by_id(style_id)
if style is None or style.type != style_type:
return self.default(style_type)
return StyleFactory(style)
def _get_style_id_from_name(self, style_name, style_type):
"""
Return the id of the style of *style_type* corresponding to
*style_name*. Returns |None| if that style is the default style for
*style_type*. Raises |ValueError| if the named style is not found in
the document or does not match *style_type*.
"""
return self._get_style_id_from_style(self[style_name], style_type)
def _get_style_id_from_style(self, style, style_type):
"""
Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*.
"""
if style.type != style_type:
raise ValueError(
"assigned style is type %s, need type %s" %
(style.type, style_type)
)
if style == self.default(style_type):
return None
return style.style_id
| mit | 9,197,335,845,036,747,000 | 34.828025 | 77 | 0.602133 | false |
lubomir/django-rest-framework | setup.py | 47 | 2970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('rest_framework')
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('djangorestframework.egg-info')
sys.exit()
setup(
name='djangorestframework',
version=version,
url='http://www.django-rest-framework.org',
license='BSD',
description='Web APIs for Django, made easy.',
author='Tom Christie',
author_email='[email protected]', # SEE NOTE BELOW (*)
packages=get_packages('rest_framework'),
package_data=get_package_data('rest_framework'),
install_requires=[],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
]
)
# (*) Please direct queries to the discussion group, rather than to me directly
# Doing so helps ensure your question is helpful to other users.
# Queries directly to my email are likely to receive a canned response.
#
# Many thanks for your understanding.
| bsd-2-clause | -6,542,664,716,366,679,000 | 30.595745 | 79 | 0.621549 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/datasets/plot_iris_dataset.py | 1 | 2738 |
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1,
edgecolor='k')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Set1, edgecolor='k', s=40)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
#plt.show()
pltshow(plt)
| mit | -4,049,645,647,576,385,000 | 25.833333 | 82 | 0.624406 | false |
le9i0nx/ansible | test/units/modules/network/nxos/test_nxos_config.py | 5 | 5079 | #!/usr/bin/env python
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_config
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosConfigModule(TestNxosModule):
module = nxos_config
def setUp(self):
super(TestNxosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestNxosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_config', 'config.cfg')
self.load_config.return_value = None
def test_nxos_config_no_change(self):
args = dict(lines=['hostname localhost'])
set_module_args(args)
result = self.execute_module()
def test_nxos_config_src(self):
args = dict(src=load_fixture('nxos_config', 'candidate.cfg'))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_lines(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_before(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
before=['before command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_nxos_config_after(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
after=['after command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_nxos_config_parents(self):
args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_nxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
| gpl-3.0 | 1,309,250,487,451,592,200 | 36.072993 | 100 | 0.66076 | false |
KarolBedkowski/photomagic | photomagick/filters/exposure.py | 1 | 1270 | #!usr/bin/python
# -*- coding: utf-8 -*-
__plugins__ = ('LowContrast', 'HiContrast', 'OverExposed', 'UnderExposed')
__version__ = '2011-03-20'
__author__ = 'Karol Będkowski'
__copyright__ = "Copyright (c) Karol Będkowski, 2011"
import ImageEnhance
from photomagick.common.base_filter import BaseFilter
from photomagick.common.const import CATEGORY_BASE
class LowContrast(BaseFilter):
NAME = _('Low Contrast')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Contrast(image).enhance(0.8)
yield 'Done', image
class HiContrast(BaseFilter):
NAME = _('Hi Contrast')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Contrast(image).enhance(1.4)
yield 'Done', image
class OverExposed(BaseFilter):
NAME = _('Over Exposed')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Brightness(image).enhance(1.4)
yield 'Done', image
class UnderExposed(BaseFilter):
NAME = _('Under Exposed')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Brightness(image).enhance(0.8)
yield 'Done', image
| gpl-2.0 | -2,965,890,010,644,923,000 | 21.642857 | 74 | 0.698738 | false |
hasgeek/coaster | coaster/views/classview.py | 1 | 33503 | """
Class-based views
-----------------
Group related views into a class for easier management.
"""
from functools import update_wrapper, wraps
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlsplit, urlunsplit
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.descriptor_props import SynonymProperty
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.orm.query import Query
# mypy can't find _request_ctx_stack in flask
from flask import ( # type: ignore[attr-defined]
Blueprint,
_request_ctx_stack,
abort,
has_request_context,
make_response,
redirect,
request,
)
from werkzeug.local import LocalProxy
from werkzeug.routing import parse_rule
from ..auth import add_auth_attribute, current_auth
from ..typing import SimpleDecorator
from ..utils import InspectableSet
__all__ = [
'rulejoin',
'current_view', # Functions
'ClassView',
'ModelView', # View base classes
'route',
'viewdata',
'url_change_check',
'requires_roles', # View decorators
'UrlChangeCheck',
'UrlForView',
'InstanceLoader', # Mixin classes
]
#: Type for URL rules in classviews
RouteRuleOptions = Dict[str, Any]
#: A proxy object that holds the currently executing :class:`ClassView` instance,
#: for use in templates as context. Exposed to templates by
#: :func:`coaster.app.init_app`. Note that the current view handler method within the
#: class is named :attr:`~current_view.current_handler`, so to examine it, use
#: :attr:`current_view.current_handler`.
current_view = LocalProxy(
lambda: getattr(_request_ctx_stack.top, 'current_view', None)
if has_request_context()
else None
)
# :func:`route` wraps :class:`ViewHandler` so that it can have an independent __doc__
def route(rule, **options):
"""
Decorator for defining routes on a :class:`ClassView` and its methods.
Accepts the same parameters that Flask's ``app.``:meth:`~flask.Flask.route`
accepts. See :class:`ClassView` for usage notes.
"""
return ViewHandler(rule, rule_options=options)
def viewdata(**kwargs):
"""
Decorator for adding additional data to a view method, to be used
alongside :func:`route`. This data is accessible as the ``data``
attribute on the view handler.
"""
return ViewHandler(None, viewdata=kwargs)
def rulejoin(class_rule, method_rule):
"""
Join class and method rules. Used internally by :class:`ClassView` to
combine rules from the :func:`route` decorators on the class and on the
individual view handler methods::
>>> rulejoin('/', '')
'/'
>>> rulejoin('/', 'first')
'/first'
>>> rulejoin('/first', '/second')
'/second'
>>> rulejoin('/first', 'second')
'/first/second'
>>> rulejoin('/first/', 'second')
'/first/second'
>>> rulejoin('/first/<second>', '')
'/first/<second>'
>>> rulejoin('/first/<second>', 'third')
'/first/<second>/third'
"""
if method_rule.startswith('/'):
return method_rule
return (
class_rule
+ ('' if class_rule.endswith('/') or not method_rule else '/')
+ method_rule
)
class ViewHandler:
"""
Internal object created by the :func:`route` and :func:`viewdata` functions.
"""
def __init__(
self,
rule,
rule_options=None,
viewdata=None, # skipcq: PYL-W0621
requires_roles=None, # skipcq: PYL-W0621
):
if rule is not None:
self.routes = [(rule, rule_options or {})]
else:
self.routes = []
self.data = viewdata or {}
self.requires_roles = requires_roles or {}
self.endpoints = set()
# Stubs for the decorator to fill
self.name = None
self.endpoint = None
self.func = None
def reroute(self, f):
# Use type(self) instead of ViewHandler so this works for (future) subclasses
# of ViewHandler
r = type(self)(None)
r.routes = self.routes
r.data = self.data
return r.__call__(f)
def copy_for_subclass(self):
# Like reroute, but just a copy
r = type(self)(None)
r.routes = self.routes
r.data = self.data
r.func = (
self.func
) # Copy func but not wrapped_func, as it will be re-wrapped by init_app
r.name = self.name
r.endpoint = self.endpoint
r.__doc__ = self.__doc__
r.endpoints = set()
return r
def __call__(self, decorated):
# Are we decorating a ClassView? If so, annotate the ClassView and return it
if type(decorated) is type and issubclass(decorated, ClassView):
if '__routes__' not in decorated.__dict__:
decorated.__routes__ = []
decorated.__routes__.extend(self.routes)
return decorated
# Are we decorating another ViewHandler? If so, copy routes and
# wrapped method from it.
if isinstance(decorated, (ViewHandler, ViewHandlerWrapper)):
self.routes.extend(decorated.routes)
newdata = dict(decorated.data)
newdata.update(self.data)
self.data = newdata
self.func = decorated.func
# If neither ClassView nor ViewHandler, assume it's a callable method
else:
self.func = decorated
self.name = self.func.__name__
# self.endpoint will change once init_app calls __set_name__
self.endpoint = self.name
self.__doc__ = self.func.__doc__ # skipcq: PYL-W0201
return self
# Normally Python 3.6+, but called manually by :meth:`ClassView.init_app`
def __set_name__(self, owner, name):
self.name = name
self.endpoint = owner.__name__ + '_' + self.name
def __get__(self, obj, cls=None):
return ViewHandlerWrapper(self, obj, cls)
def init_app(self, app, cls, callback=None):
"""
Register routes for a given app and :class:`ClassView` class. At the
time of this call, we will always be in the view class even if we were
originally defined in a base class. :meth:`ClassView.init_app`
ensures this. :meth:`init_app` therefore takes the liberty of adding
additional attributes to ``self``:
* :attr:`wrapped_func`: The function wrapped with all decorators added by the
class
* :attr:`view_func`: The view function registered as a Flask view handler
* :attr:`endpoints`: The URL endpoints registered to this view handler
"""
def view_func(**view_args):
# view_func does not make any reference to variables from init_app to avoid
# creating a closure. Instead, the code further below sticks all relevant
# variables into view_func's namespace.
# Instantiate the view class. We depend on its __init__ requiring no
# parameters
viewinst = view_func.view_class()
# Declare ourselves (the ViewHandler) as the current view. The wrapper makes
# equivalence tests possible, such as ``self.current_handler == self.index``
viewinst.current_handler = ViewHandlerWrapper(
view_func.view, viewinst, view_func.view_class
)
# Place view arguments in the instance, in case they are needed outside the
# dispatch process
viewinst.view_args = view_args
# Place the view instance on the request stack for :obj:`current_view` to
# discover
_request_ctx_stack.top.current_view = viewinst
# Call the view instance's dispatch method. View classes can customise this
# for desired behaviour.
return viewinst.dispatch_request(view_func.wrapped_func, view_args)
# Decorate the wrapped view function with the class's desired decorators.
# Mixin classes may provide their own decorators, and all of them will be
# applied. The oldest defined decorators (from mixins) will be applied first,
# and the class's own decorators last. Within the list of decorators, we reverse
# the list again, so that a list specified like this:
#
# __decorators__ = [first, second]
#
# Has the same effect as writing this:
#
# @first
# @second
# def myview(self):
# pass
wrapped_func = self.func
for base in reversed(cls.__mro__):
if '__decorators__' in base.__dict__:
for decorator in reversed(base.__dict__['__decorators__']):
wrapped_func = decorator(wrapped_func)
wrapped_func.__name__ = self.name # See below
# Make view_func resemble the underlying view handler method...
view_func = update_wrapper(view_func, wrapped_func)
# ...but give view_func the name of the method in the class (self.name),
# self.name will differ from __name__ only if the view handler method
# was defined outside the class and then added to the class with a
# different name.
view_func.__name__ = self.name
# Stick `wrapped_func` and `cls` into view_func to avoid creating a closure.
view_func.wrapped_func = wrapped_func
view_func.view_class = cls
view_func.view = self
# Keep a copy of these functions (we already have self.func)
self.wrapped_func = wrapped_func # skipcq: PYL-W0201
self.view_func = view_func # skipcq: PYL-W0201
for class_rule, class_options in cls.__routes__:
for method_rule, method_options in self.routes:
use_options = dict(method_options)
use_options.update(class_options)
endpoint = use_options.pop('endpoint', self.endpoint)
self.endpoints.add(endpoint)
use_rule = rulejoin(class_rule, method_rule)
app.add_url_rule(use_rule, endpoint, view_func, **use_options)
if callback:
callback(use_rule, endpoint, view_func, **use_options)
class ViewHandlerWrapper:
"""Wrapper for a view at runtime"""
def __init__(self, viewh, obj, cls=None):
# obj is the ClassView instance
self._viewh = viewh
self._obj = obj
self._cls = cls
def __call__(self, *args, **kwargs):
"""Treat this like a call to the method (and not to the view)"""
# As per the __decorators__ spec, we call .func, not .wrapped_func
return self._viewh.func(self._obj, *args, **kwargs)
def __getattr__(self, name):
return getattr(self._viewh, name)
def __eq__(self, other):
return (
isinstance(other, ViewHandlerWrapper)
and self._viewh == other._viewh
and self._obj == other._obj
and self._cls == other._cls
)
def __ne__(self, other): # pragma: no cover
return not self.__eq__(other)
def is_available(self):
"""Indicates whether this view is available in the current context"""
if hasattr(self._viewh.wrapped_func, 'is_available'):
return self._viewh.wrapped_func.is_available(self._obj)
return True
class ClassView:
"""
Base class for defining a collection of views that are related to each
other. Subclasses may define methods decorated with :func:`route`. When
:meth:`init_app` is called, these will be added as routes to the app.
Typical use::
@route('/')
class IndexView(ClassView):
@viewdata(title="Homepage")
@route('')
def index():
return render_template('index.html.jinja2')
@route('about')
@viewdata(title="About us")
def about():
return render_template('about.html.jinja2')
IndexView.init_app(app)
The :func:`route` decorator on the class specifies the base rule, which is
prefixed to the rule specified on each view method. This example produces
two view handlers, for ``/`` and ``/about``. Multiple :func:`route`
decorators may be used in both places.
The :func:`viewdata` decorator can be used to specify additional data, and
may appear either before or after the :func:`route` decorator, but only
adjacent to it. Data specified here is available as the :attr:`data`
attribute on the view handler, or at runtime in templates as
``current_view.current_handler.data``.
A rudimentary CRUD view collection can be assembled like this::
@route('/doc/<name>')
class DocumentView(ClassView):
@route('')
@render_with('mydocument.html.jinja2', json=True)
def view(self, name):
document = MyDocument.query.filter_by(name=name).first_or_404()
return document.current_access()
@route('edit', methods=['POST'])
@requestform('title', 'content')
def edit(self, name, title, content):
document = MyDocument.query.filter_by(name=name).first_or_404()
document.title = title
document.content = content
return 'edited!'
DocumentView.init_app(app)
See :class:`ModelView` for a better way to build views around a model.
"""
# If the class did not get a @route decorator, provide a fallback route
__routes__: List[Tuple[str, RouteRuleOptions]] = [('', {})]
#: Track all the views registered in this class
__views__ = ()
#: Subclasses may define decorators here. These will be applied to every
#: view handler in the class, but only when called as a view and not
#: as a Python method call.
__decorators__: List[SimpleDecorator] = []
#: Indicates whether meth:`is_available` should simply return `True`
#: without conducting a test. Subclasses should not set this flag. It will
#: be set by :meth:`init_app` if any view handler is missing an
#: ``is_available`` method, as it implies that view is always available.
is_always_available = False
#: When a view is called, this will point to the current view handler,
#: an instance of :class:`ViewHandler`.
current_handler = None
#: When a view is called, this will be replaced with a dictionary of
#: arguments to the view.
view_args: Optional[dict] = None
def __eq__(self, other):
return type(other) is type(self)
def dispatch_request(self, view, view_args):
"""
View dispatcher that calls before_request, the view, and then after_request.
Subclasses may override this to provide a custom flow. :class:`ModelView`
does this to insert a model loading phase.
:param view: View method wrapped in specified decorators. The dispatcher
must call this
:param dict view_args: View arguments, to be passed on to the view method
"""
# Call the :meth:`before_request` method
resp = self.before_request()
if resp:
return self.after_request(make_response(resp))
# Call the view handler method, then pass the response to :meth:`after_response`
return self.after_request(make_response(view(self, **view_args)))
def before_request(self):
"""
This method is called after the app's ``before_request`` handlers, and
before the class's view method. Subclasses and mixin classes may define
their own :meth:`before_request` to pre-process requests. This method
receives context via `self`, in particular via :attr:`current_handler`
and :attr:`view_args`.
"""
return None
def after_request(self, response):
"""
This method is called with the response from the view handler method.
It must return a valid response object. Subclasses and mixin classes
may override this to perform any necessary post-processing::
class MyView(ClassView):
...
def after_request(self, response):
response = super().after_request(response)
... # Process here
return response
:param response: Response from the view handler method
:return: Response object
"""
return response
def is_available(self):
"""
Returns `True` if *any* view handler in the class is currently
available via its `is_available` method.
"""
if self.is_always_available:
return True
for viewname in self.__views__:
if getattr(self, viewname).is_available():
return True
return False
@classmethod
def __get_raw_attr(cls, name):
for base in cls.__mro__:
if name in base.__dict__:
return base.__dict__[name]
raise AttributeError(name)
@classmethod
def add_route_for(cls, _name, rule, **options):
"""
Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule`
"""
setattr(cls, _name, route(rule, **options)(cls.__get_raw_attr(_name)))
@classmethod
def init_app(cls, app, callback=None):
"""
Register views on an app. If :attr:`callback` is specified, it will
be called after ``app.``:meth:`~flask.Flask.add_url_rule`, with the same
parameters.
"""
processed = set()
cls.__views__ = set()
cls.is_always_available = False
for base in cls.__mro__:
for name, attr in base.__dict__.items():
if name in processed:
continue
processed.add(name)
if isinstance(attr, ViewHandler):
if base != cls: # Copy ViewHandler instances into subclasses
# TODO: Don't do this during init_app. Use a metaclass
# and do this when the class is defined.
attr = attr.copy_for_subclass()
setattr(cls, name, attr)
attr.__set_name__(cls, name) # Required for Python < 3.6
cls.__views__.add(name)
attr.init_app(app, cls, callback=callback)
if not hasattr(attr.wrapped_func, 'is_available'):
cls.is_always_available = True
class ModelView(ClassView):
"""
Base class for constructing views around a model. Functionality is provided
via mixin classes that must precede :class:`ModelView` in base class order.
Two mixins are provided: :class:`UrlForView` and :class:`InstanceLoader`.
Sample use::
@route('/doc/<document>')
class DocumentView(UrlForView, InstanceLoader, ModelView):
model = Document
route_model_map = {
'document': 'name'
}
@route('')
@render_with(json=True)
def view(self):
return self.obj.current_access()
Document.views.main = DocumentView
DocumentView.init_app(app)
Views will not receive view arguments, unlike in :class:`ClassView`. If
necessary, they are available as `self.view_args`.
"""
#: The model that this view class represents, to be specified by subclasses.
model: Optional[Any] = None
#: A base query to use if the model needs special handling.
query: Optional[Query] = None
#: A mapping of URL rule variables to attributes on the model. For example,
#: if the URL rule is ``/<parent>/<document>``, the attribute map can be::
#:
#: model = MyModel
#: route_model_map = {
#: 'document': 'name', # Map 'document' in URL to MyModel.name
#: 'parent': 'parent.name', # Map 'parent' to MyModel.parent.name
#: }
#:
#: The :class:`InstanceLoader` mixin class will convert this mapping into
#: SQLAlchemy attribute references to load the instance object.
route_model_map: Dict[str, str] = {}
def __init__(self, obj=None):
super().__init__()
self.obj = obj
def __eq__(self, other):
return type(other) is type(self) and other.obj == self.obj
def dispatch_request(self, view, view_args):
"""
View dispatcher that calls :meth:`before_request`, :meth:`loader`,
:meth:`after_loader`, the view, and then :meth:`after_request`.
:param view: View method wrapped in specified decorators.
:param dict view_args: View arguments, to be passed on to the view method
"""
# Call the :meth:`before_request` method
resp = self.before_request()
if resp:
return self.after_request(make_response(resp))
# Load the database model
self.obj = self.loader(**view_args)
# Trigger pre-view processing of the loaded object
resp = self.after_loader()
if resp:
return self.after_request(make_response(resp))
# Call the view handler method, then pass the response to :meth:`after_response`
return self.after_request(make_response(view(self)))
def loader(self, **view_args): # pragma: no cover
"""
Subclasses or mixin classes may override this method to provide a model
instance loader. The return value of this method will be placed at
``self.obj``.
:return: Object instance loaded from database
"""
raise NotImplementedError("View class is missing a loader method")
def after_loader(self):
# Determine permissions available on the object for the current actor,
# but only if the view method has a requires_permission decorator
if hasattr(self.current_handler.wrapped_func, 'requires_permission'):
if isinstance(self.obj, tuple):
perms = None
for subobj in self.obj:
if hasattr(subobj, 'permissions'):
perms = subobj.permissions(current_auth.actor, perms)
perms = InspectableSet(perms or set())
elif hasattr(self.obj, 'current_permissions'):
# current_permissions always returns an InspectableSet
perms = self.obj.current_permissions
else:
perms = InspectableSet()
add_auth_attribute('permissions', perms)
return None
def requires_roles(roles):
"""
Decorator for :class:`ModelView` views that limits access to the specified
roles.
"""
def inner(f):
def is_available_here(context):
return context.obj.roles_for(current_auth.actor).has_any(roles)
def is_available(context):
result = is_available_here(context)
if result and hasattr(f, 'is_available'):
# We passed, but we're wrapping another test, so ask there as well
return f.is_available(context)
return result
@wraps(f)
def wrapper(self, *args, **kwargs):
add_auth_attribute('login_required', True)
if not is_available_here(self):
abort(403)
return f(self, *args, **kwargs)
wrapper.requires_roles = roles
wrapper.is_available = is_available
return wrapper
return inner
class UrlForView:
"""
Mixin class for :class:`ModelView` that registers view handler methods with
:class:`~coaster.sqlalchemy.mixins.UrlForMixin`'s
:meth:`~coaster.sqlalchemy.mixins.UrlForMixin.is_url_for`.
"""
@classmethod
def init_app(cls, app, callback=None):
def register_view_on_model(rule, endpoint, view_func, **options):
# Only pass in the attrs that are included in the rule.
# 1. Extract list of variables from the rule
rulevars = [v for c, a, v in parse_rule(rule)]
if options.get('host'):
rulevars.extend(v for c, a, v in parse_rule(options['host']))
if options.get('subdomain'):
rulevars.extend(v for c, a, v in parse_rule(options['subdomain']))
# Make a subset of cls.route_model_map with the required variables
params = {
v: cls.route_model_map[v] for v in rulevars if v in cls.route_model_map
}
# Register endpoint with the view function's name, endpoint name and
# parameters. Register the view for a specific app, unless we're in a
# Blueprint, in which case it's not an app.
# FIXME: The behaviour of a Blueprint + multi-app combo is unknown and needs
# tests.
if isinstance(app, Blueprint):
prefix = app.name + '.'
reg_app = None
else:
prefix = ''
reg_app = app
cls.model.register_endpoint(
action=view_func.__name__,
endpoint=prefix + endpoint,
app=reg_app,
roles=getattr(view_func, 'requires_roles', None),
paramattrs=params,
)
cls.model.register_view_for(
app=reg_app,
action=view_func.__name__,
classview=cls,
attr=view_func.__name__,
)
if callback: # pragma: no cover
callback(rule, endpoint, view_func, **options)
super().init_app(app, callback=register_view_on_model)
def url_change_check(f):
"""
View method decorator that checks the URL of the loaded object in
``self.obj`` against the URL in the request (using
``self.obj.url_for(__name__)``). If the URLs do not match,
and the request is a ``GET``, it issues a redirect to the correct URL.
Usage::
@route('/doc/<document>')
class MyModelView(UrlForView, InstanceLoader, ModelView):
model = MyModel
route_model_map = {'document': 'url_id_name'}
@route('')
@url_change_check
@render_with(json=True)
def view(self):
return self.obj.current_access()
If the decorator is required for all view handlers in the class, use
:class:`UrlChangeCheck`.
This decorator will only consider the URLs to be different if:
* Schemes differ (``http`` vs ``https`` etc)
* Hostnames differ (apart from a case difference, as user agents use lowercase)
* Paths differ
The current URL's query will be copied to the redirect URL. The URL fragment
(``#target_id``) is not available to the server and will be lost.
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
if request.method == 'GET' and self.obj is not None:
correct_url = self.obj.url_for(f.__name__, _external=True)
if correct_url != request.base_url:
# What's different? If it's a case difference in hostname, or different
# port number, username, password, query or fragment, ignore. For any
# other difference (scheme, hostname or path), do a redirect.
correct_url_parts = urlsplit(correct_url)
request_url_parts = urlsplit(request.base_url)
reconstructed_url = urlunsplit(
(
correct_url_parts.scheme,
correct_url_parts.hostname.lower(), # Replace netloc
correct_url_parts.path,
'', # Drop query
'', # Drop fragment
)
)
reconstructed_ref = urlunsplit(
(
request_url_parts.scheme,
request_url_parts.hostname.lower(), # Replace netloc
request_url_parts.path,
'', # Drop query
'', # Drop fragment
)
)
if reconstructed_url != reconstructed_ref:
if request.query_string:
correct_url = urlunsplit(
correct_url_parts._replace(
query=request.query_string.decode('utf-8')
)
)
return redirect(
correct_url
) # TODO: Decide if this should be 302 (default) or 301
return f(self, *args, **kwargs)
return wrapper
class UrlChangeCheck(UrlForView):
"""
Mixin class for :class:`ModelView` and
:class:`~coaster.sqlalchemy.mixins.UrlForMixin` that applies the
:func:`url_change_check` decorator to all view handler methods. Subclasses
:class:`UrlForView`, which it depends on to register the view with the
model so that URLs can be generated. Usage::
@route('/doc/<document>')
class MyModelView(UrlChangeCheck, InstanceLoader, ModelView):
model = MyModel
route_model_map = {'document': 'url_id_name'}
@route('')
@render_with(json=True)
def view(self):
return self.obj.current_access()
"""
__decorators__ = [url_change_check]
class InstanceLoader:
"""
Mixin class for :class:`ModelView` that provides a :meth:`loader` that
attempts to load an instance of the model based on attributes in the
:attr:`~ModelView.route_model_map` dictionary.
:class:`InstanceLoader` will traverse relationships (many-to-one or
one-to-one) and perform a SQL ``JOIN`` with the target class.
"""
def loader(self, **view_args):
if any((name in self.route_model_map for name in view_args)):
# We have a URL route attribute that matches one of the model's attributes.
# Attempt to load the model instance
filters = {
self.route_model_map[key]: value
for key, value in view_args.items()
if key in self.route_model_map
}
query = self.query or self.model.query
joined_models = set()
for name, value in filters.items():
if '.' in name:
# Did we get something like `parent.name`?
# Dig into it to find the source column
source = self.model
for subname in name.split('.'):
attr = relattr = getattr(source, subname)
# Did we get to something like 'parent'?
# 1. If it's a synonym, get the attribute it is a synonym for
# 2. If it's a relationship, find the source class, join it to
# the query, and then continue looking for attributes over there
if hasattr(attr, 'original_property') and isinstance(
attr.original_property, SynonymProperty
):
attr = getattr(source, attr.original_property.name)
if isinstance(attr, InstrumentedAttribute) and isinstance(
attr.property, RelationshipProperty
):
if isinstance(attr.property.argument, Mapper):
attr = (
attr.property.argument.class_
) # Unlikely to be used. pragma: no cover
else:
attr = attr.property.argument
if attr not in joined_models:
# SQL JOIN the other model on the basis of
# the relationship that led us to this join
query = query.join(attr, relattr)
# But ensure we don't JOIN twice
joined_models.add(attr)
source = attr
query = query.filter(source == value)
else:
query = query.filter(getattr(self.model, name) == value)
obj = query.one_or_404()
return obj
| bsd-2-clause | 7,673,760,242,782,383,000 | 37.866589 | 88 | 0.57768 | false |
CalHoll/SoundMoose | server/project/conf/base.py | 3 | 5770 | import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(
os.path.join(BASE_DIR, 'apps')
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'rest_framework',
'rest_framework_swagger',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'core.middleware.corsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.normpath(os.path.join(BASE_DIR, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'soundmoose',
'USER': 'postgres',
'PASSWORD': 'hrr20soundmoose',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = os.path.join(ROOT_DIR, 'assets')
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.normpath(os.path.join(ROOT_DIR, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filters': ['require_debug_false'],
'filename': 'log/error.log',
'formatter': 'verbose'
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
TEMPLATE_LOADERS = [
'django.template.loaders.eggs.Loader',
]
# CORS Middleware https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = (
'soundmoose.com',
'www.soundmoose.com',
'localhost:3000',
)
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
| mit | 4,282,894,446,418,767,400 | 24.990991 | 93 | 0.620104 | false |
Y3K/django | django/middleware/locale.py | 358 | 2983 | "This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.functional import cached_property
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
language = translation.get_language_from_request(
request, check_path=self.is_language_prefix_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/')
and is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
@cached_property
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
| bsd-3-clause | 8,892,617,960,260,965,000 | 41.014085 | 98 | 0.634596 | false |
DoubleNegativeVisualEffects/cortex | test/IECoreRI/DoubleSided.py | 7 | 2640 | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreRI
import os.path
import os
class DoubleSidedTest( IECoreRI.TestCase ) :
def test( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testDoubleSided.rib" )
self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
r.setAttribute( "doubleSided", IECore.BoolData( False ) )
self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( False ) )
del r
l = "".join( file( "test/IECoreRI/output/testDoubleSided.rib" ).readlines() )
self.assert_( "Sides 1" in l )
r = IECoreRI.Renderer( "test/IECoreRI/output/testDoubleSided.rib" )
r.setAttribute( "doubleSided", IECore.BoolData( True ) )
del r
l = "".join( file( "test/IECoreRI/output/testDoubleSided.rib" ).readlines() )
self.assert_( "Sides 2" in l )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 1,475,070,116,252,203,000 | 40.904762 | 79 | 0.692424 | false |
vishesh92/redash | old_migrations/0011_migrate_bigquery_to_json.py | 20 | 1391 | from base64 import b64encode
import json
from redash.models import DataSource
def convert_p12_to_pem(p12file):
from OpenSSL import crypto
with open(p12file, 'rb') as f:
p12 = crypto.load_pkcs12(f.read(), "notasecret")
return crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())
if __name__ == '__main__':
for ds in DataSource.select(DataSource.id, DataSource.type, DataSource.options):
if ds.type == 'bigquery':
options = json.loads(ds.options)
if 'jsonKeyFile' in options:
continue
new_options = {
'projectId': options['projectId'],
'jsonKeyFile': b64encode(json.dumps({
'client_email': options['serviceAccount'],
'private_key': convert_p12_to_pem(options['privateKey'])
}))
}
ds.options = json.dumps(new_options)
ds.save(only=ds.dirty_fields)
elif ds.type == 'google_spreadsheets':
options = json.loads(ds.options)
if 'jsonKeyFile' in options:
continue
with open(options['credentialsFilePath']) as f:
new_options = {
'jsonKeyFile': b64encode(f.read())
}
ds.options = json.dumps(new_options)
ds.save(only=ds.dirty_fields)
| bsd-2-clause | -8,830,291,864,657,888,000 | 30.613636 | 84 | 0.553559 | false |
lache/RacingKingLee | monitor/engine.win64/2.74/python/lib/site-packages/numpy/f2py/auxfuncs.py | 75 | 19979 | #!/usr/bin/env python
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
f2py_version = __version__.version
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
options={}
debugoptions=[]
wrapfuncs = 1
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec']=='character' and (not isexternal(var))
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that
# `character*(*) a(m)` and `character a(m,*)`
# are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1]=='(*)'
def isarray(var):
return 'dimension' in var and (not isexternal(var))
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and var.get('typespec') in ['complex', 'double complex']
def islogical(var):
return isscalar(var) and var.get('typespec')=='logical'
def isinteger(var):
return isscalar(var) and var.get('typespec')=='integer'
def isreal(var):
return isscalar(var) and var.get('typespec')=='real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var)=='8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var)=='32'
def iscomplexarray(var):
return isarray(var) and var.get('typespec') in ['complex', 'double complex']
def isint1array(var):
return isarray(var) and var.get('typespec')=='integer' \
and get_kind(var)=='1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not (not 'dimension' in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return ('block' in rout and 'module'==rout['block'])
def isfunction(rout):
return ('block' in rout and 'function'==rout['block'])
#def isfunction_wrap(rout):
# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return ('block' in rout and 'subroutine'==rout['block'])
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a, {}).get('dimension', []):
if d==':':
rout['hasassumedshape'] = True
return True
return False
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return ('attrspec' in var and 'external' in var['attrspec'])
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
def isintent_out(var):
return 'out' in var.get('intent', [])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout, isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent', [])
# def isintent_f(var):
# return not isintent_c(var)
def isintent_cache(var):
return 'cache' in var.get('intent', [])
def isintent_copy(var):
return 'copy' in var.get('intent', [])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', [])
def isintent_callback(var):
return 'callback' in var.get('intent', [])
def isintent_inplace(var):
return 'inplace' in var.get('intent', [])
def isintent_aux(var):
return 'aux' in var.get('intent', [])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', [])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', [])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', [])
isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
isintent_cache: 'INTENT_CACHE',
isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
isintent_inplace: 'INTENT_INPLACE',
isintent_aligned4: 'INTENT_ALIGNED4',
isintent_aligned8: 'INTENT_ALIGNED8',
isintent_aligned16: 'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
raise F2PYError(mess)
def l_and(*f):
l, l2='lambda v', []
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l, i, i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l, ' and '.join(l2)))
def l_or(*f):
l, l2='lambda v', []
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l, i, i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l, ' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname']==''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name=='':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout,blockname,comment=1,counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r: return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
r = '\t/* start ' + blockname + ' multiline ('+repr(counter)+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
r = r[:-3] + '\n\t/* end multiline ('+repr(counter)+')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n" \
% (blockname, repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout,cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r: return r
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n]+'_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
#ctype = 'void*'
else:
ctype = ctype+'*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types+arg_types2)
if not proto_args:
proto_args = 'void'
#print proto_args
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args=[], []
if 'args' in rout:
args=rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args=[], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block']=='python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
l=(80-len(name)-6)//2
return '/*%s %s %s*/'%(l*'*', name, l*'*')
def flatlist(l):
if isinstance(l, list):
return reduce(lambda x,y,f=flatlist:x+f(y), l, [])
return [l]
def stripcomma(s):
if s and s[-1]==',': return s[:-1]
return s
def replace(str,d,defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2*list(d.keys()):
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
if isinstance(d[k], list):
str=str.replace('#%s#'%(k), sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k), d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd=dictappend(rd, a)
return rd
for k in ar.keys():
if k[0]=='_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k]=[rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1]=ar[k][k1]
else:
rd[k]=dictappend(rd[k], ar[k])
else:
rd[k]=ar[k]
return rd
def applyrules(rules,d,var={}):
ret={}
if isinstance(rules, list):
for r in rules:
rr=applyrules(r, d, var)
ret=dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs':rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
if isinstance(rules[k], str):
ret[k]=replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0]=='_':
continue
elif isinstance(rules[k], dict):
ret[k]=[]
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res=applyrules({'supertext':i}, d, var)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i, d))
else:
i=rules[k][k1]
if isinstance(i, dict):
res=applyrules({'supertext':i}, d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i, d))
else:
errmess('applyrules: ignoring rule %s.\n'%repr(rules[k]))
if isinstance(ret[k], list):
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
del ret[k]
return ret
| mit | 7,611,495,620,189,862,000 | 27.099859 | 182 | 0.560539 | false |
dfdx2/django | tests/many_to_one/tests.py | 12 | 30596 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils.translation import gettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"]
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with self.assertRaisesMessage(TypeError, "'Article' instance expected, got <Reporter:"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Implied __exact also works
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
["<Article: John's second story>", "<Article: This is a test>"]
)
# ... and should work fine with the string that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = ["<Reporter: John Smith>"]
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith)
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
john_smith
)
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith)
# Implied __exact also works.
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
qs = Article.objects.filter(
reporter=self.r,
).distinct().order_by().values('reporter__first_name', 'reporter__last_name')
self.assertEqual([d], list(qs))
def test_select_related(self):
# Article.objects.select_related().dates() works properly when there
# are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'day')),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'month')),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'year')),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)]
)
def test_delete(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(
Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>", "<Reporter: Paul Jones>"]
)
self.r2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
["<Article: John's second test>", "<Article: This is a test>"]
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
with self.assertRaises(MultipleObjectsReturned):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)))
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_gettext_lazy(self):
reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]')
lazy = gettext_lazy('test')
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = str(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
with self.assertRaises(ValueError):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name='xyzzy', parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
with self.assertRaises(ValueError):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# The <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(School.objects.all(), ["<School: School object>"])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
def test_clear_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.clear()
self.assertQuerysetEqual(city.districts.all(), [])
def test_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
d = District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.remove(d)
self.assertQuerysetEqual(city.districts.all(), [])
def test_add_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
def test_set_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.set([d2])
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladidu>'])
def test_add_then_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
city.districts.remove(d2)
self.assertEqual(city.districts.count(), 1)
| bsd-3-clause | -1,458,729,472,314,240,300 | 45.92638 | 119 | 0.62348 | false |
DrabWeb/iTerm2 | tests/esctest/tests/el.py | 31 | 2319 | from esc import NUL, blank
import escargs
import esccmd
import escio
from esctypes import Point, Rect
from escutil import AssertEQ, AssertScreenCharsInRectEqual, GetCursorPosition, knownBug
class ELTests(object):
def prepare(self):
"""Initializes the screen to abcdefghij on the first line with the cursor
on the 'e'."""
esccmd.CUP(Point(1, 1))
escio.Write("abcdefghij")
esccmd.CUP(Point(5, 1))
def test_EL_Default(self):
"""Should erase to right of cursor."""
self.prepare()
esccmd.EL()
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ "abcd" + 6 * NUL ])
def test_EL_0(self):
"""Should erase to right of cursor."""
self.prepare()
esccmd.EL(0)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ "abcd" + 6 * NUL ])
def test_EL_1(self):
"""Should erase to left of cursor."""
self.prepare()
esccmd.EL(1)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 5 * blank() + "fghij" ])
def test_EL_2(self):
"""Should erase whole line."""
self.prepare()
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 10 * NUL ])
def test_EL_IgnoresScrollRegion(self):
"""Should erase whole line."""
self.prepare()
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(2, 4)
esccmd.CUP(Point(5, 1))
esccmd.EL(2)
esccmd.DECRESET(esccmd.DECLRMM)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 10 * NUL ])
def test_EL_doesNotRespectDECProtection(self):
"""EL respects DECSCA."""
escio.Write("a")
escio.Write("b")
esccmd.DECSCA(1)
escio.Write("c")
esccmd.DECSCA(0)
esccmd.CUP(Point(1, 1))
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ NUL * 3 ])
@knownBug(terminal="iTerm2",
reason="Protection not implemented.")
def test_EL_respectsISOProtection(self):
"""EL respects SPA/EPA."""
escio.Write("a")
escio.Write("b")
esccmd.SPA()
escio.Write("c")
esccmd.EPA()
esccmd.CUP(Point(1, 1))
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ blank() * 2 + "c" ])
| gpl-2.0 | 3,243,512,608,684,006,000 | 27.9875 | 87 | 0.572229 | false |
dougwig/x-neutron-lbaas | neutron_lbaas/openstack/common/service.py | 2 | 15276 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from neutron_lbaas.openstack.common import eventlet_backdoor
from neutron_lbaas.openstack.common._i18n import _LE, _LI, _LW
from neutron_lbaas.openstack.common import log as logging
from neutron_lbaas.openstack.common import systemd
from neutron_lbaas.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| apache-2.0 | 6,979,269,728,550,008,000 | 29.309524 | 79 | 0.59564 | false |
ioram7/keystone-federado-pgid2013 | build/lib.linux-x86_64-2.7/keystone/contrib/ec2/backends/kvs.py | 9 | 1820 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import kvs
class Ec2(kvs.Base):
# Public interface
def get_credential(self, credential_id):
credential_ref = self.db.get('credential-%s' % credential_id)
return credential_ref
def list_credentials(self, user_id):
credential_ids = self.db.get('credential_list', [])
rv = [self.get_credential(x) for x in credential_ids]
return [x for x in rv if x['user_id'] == user_id]
# CRUD
def create_credential(self, credential_id, credential):
self.db.set('credential-%s' % credential_id, credential)
credential_list = set(self.db.get('credential_list', []))
credential_list.add(credential_id)
self.db.set('credential_list', list(credential_list))
return credential
def delete_credential(self, credential_id):
# This will ensure credential-%s is here before deleting
self.db.get('credential-%s' % credential_id)
self.db.delete('credential-%s' % credential_id)
credential_list = set(self.db.get('credential_list', []))
credential_list.remove(credential_id)
self.db.set('credential_list', list(credential_list))
return None
| apache-2.0 | 8,435,911,308,602,054,000 | 38.565217 | 75 | 0.685714 | false |
TMU-VHDL-team2/sqrt | wiki_data/a_dec.py | 1 | 1269 | #!/usr/bin/env python3
def func1():
if j < 0:
if (32768 >> (-j-1)) < x1:
return y2
else:
return x1 << -j
else:
return x1 >> j
def func2():
if j < 0:
return y >> -j
else:
return y << j
x1 = int(input())
x0 = 0
a = 0
y = 0
n = 0
c = 0
print(hex(x1))
t = x1
while t > 0:
t >>= 1
n += 1
n += 16
n += n & 1
for i in range(n, -1, -2):
j = i - 16
a <<= 1
y <<= 1
if y > 65535:
y %= 65536 # 下16ビットをとる
y2 = (1 | y)
c = True
f1 = func1()
if func1() < y2:
if x0 >> i < y2:
c = False
if c:
a += 1
y += 1
x1 -= func2()
x0 -= (y << i) % 65536 # 下16ビットをとる
if x0 < 0:
x1 -= 1
x0 += 65536 # 下16ビットをとる
y += 1
print('i, c, a, y, x1, x0, func1, x0>>i, y2 :',
"{0:2d}".format(i),
"{0:2d}".format(c),
"{0:6d}".format(a),
"{0:8s}".format(hex(y)),
"{0:8s}".format(hex(x1)),
"{0:8s}".format(hex(x0)),
"{0:6d}".format(f1),
"{0:6d}".format(x0>>i),
"{0:6d}".format(y2)
)
print(hex(a), ' = ', a / 256.)
| mit | -6,097,552,678,139,865,000 | 17.590909 | 51 | 0.356968 | false |
openid/python-openid | examples/djopenid/consumer/views.py | 1 | 8229 | from __future__ import unicode_literals
import six
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from openid.consumer import consumer
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import ax, pape, sreg
from openid.server.trustroot import RP_RETURN_TO_URL_TYPE
from openid.yadis.constants import YADIS_HEADER_NAME
from .. import util
PAPE_POLICIES = [
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
# List of (name, uri) for use in generating the request form.
POLICY_PAIRS = [(p, getattr(pape, p))
for p in PAPE_POLICIES]
def getOpenIDStore():
"""
Return an OpenID store object fit for the currently-chosen
database backend, if any.
"""
return util.getOpenIDStore('/tmp/djopenid_c_store', 'c_')
def getConsumer(request):
"""
Get a Consumer object to perform OpenID authentication.
"""
return consumer.Consumer(request.session, getOpenIDStore())
def renderIndexPage(request, **template_args):
template_args['consumer_url'] = request.build_absolute_uri(reverse('consumer:index'))
template_args['pape_policies'] = POLICY_PAIRS
response = render(request, 'consumer/index.html', template_args)
response[YADIS_HEADER_NAME] = request.build_absolute_uri(reverse('consumer:xrds'))
return response
def startOpenID(request):
"""
Start the OpenID authentication process. Renders an
authentication form and accepts its POST.
* Renders an error message if OpenID cannot be initiated
* Requests some Simple Registration data using the OpenID
library's Simple Registration machinery
* Generates the appropriate trust root and return URL values for
this application (tweak where appropriate)
* Generates the appropriate redirect based on the OpenID protocol
version.
"""
if request.POST:
# Start OpenID authentication.
openid_url = request.POST['openid_identifier']
c = getConsumer(request)
error = None
try:
auth_request = c.begin(openid_url)
except DiscoveryFailure as e:
# Some other protocol-level failure occurred.
error = "OpenID discovery error: %s" % (six.text_type(e),)
if error:
# Render the page with an error.
return renderIndexPage(request, error=error)
# Add Simple Registration request information. Some fields
# are optional, some are required. It's possible that the
# server doesn't support sreg or won't return any of the
# fields.
sreg_request = sreg.SRegRequest(optional=['email', 'nickname'],
required=['dob'])
auth_request.addExtension(sreg_request)
# Add Attribute Exchange request information.
ax_request = ax.FetchRequest()
# XXX - uses myOpenID-compatible schema values, which are
# not those listed at axschema.org.
ax_request.add(
ax.AttrInfo('http://schema.openid.net/namePerson',
required=True))
ax_request.add(
ax.AttrInfo('http://schema.openid.net/contact/web/default',
required=False, count=ax.UNLIMITED_VALUES))
auth_request.addExtension(ax_request)
# Add PAPE request information. We'll ask for
# phishing-resistant auth and display any policies we get in
# the response.
requested_policies = []
policy_prefix = 'policy_'
for k, v in six.iteritems(request.POST):
if k.startswith(policy_prefix):
policy_attr = k[len(policy_prefix):]
if policy_attr in PAPE_POLICIES:
requested_policies.append(getattr(pape, policy_attr))
if requested_policies:
pape_request = pape.Request(requested_policies)
auth_request.addExtension(pape_request)
# Compute the trust root and return URL values to build the
# redirect information.
trust_root = request.build_absolute_uri(reverse('consumer:index'))
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
# Send the browser to the server either by sending a redirect
# URL or by generating a POST form.
if auth_request.shouldSendRedirect():
url = auth_request.redirectURL(trust_root, return_to)
return HttpResponseRedirect(url)
else:
# Beware: this renders a template whose content is a form
# and some javascript to submit it upon page load. Non-JS
# users will have to click the form submit button to
# initiate OpenID authentication.
form_id = 'openid_message'
form_html = auth_request.formMarkup(trust_root, return_to,
False, {'id': form_id})
return render(request, 'consumer/request_form.html', {'html': form_html})
return renderIndexPage(request)
def finishOpenID(request):
"""
Finish the OpenID authentication process. Invoke the OpenID
library with the response from the OpenID server and render a page
detailing the result.
"""
result = {}
# Because the object containing the query parameters is a
# MultiValueDict and the OpenID library doesn't allow that, we'll
# convert it to a normal dict.
# OpenID 2 can send arguments as either POST body or GET query
# parameters.
request_args = util.normalDict(request.GET)
if request.method == 'POST':
request_args.update(util.normalDict(request.POST))
if request_args:
c = getConsumer(request)
# Get a response object indicating the result of the OpenID
# protocol.
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
response = c.complete(request_args, return_to)
# Get a Simple Registration response object if response
# information was included in the OpenID response.
sreg_response = {}
ax_items = {}
if response.status == consumer.SUCCESS:
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
ax_items = {
'fullname': ax_response.get(
'http://schema.openid.net/namePerson'),
'web': ax_response.get(
'http://schema.openid.net/contact/web/default'),
}
# Get a PAPE response object if response information was
# included in the OpenID response.
pape_response = None
if response.status == consumer.SUCCESS:
pape_response = pape.Response.fromSuccessResponse(response)
if not pape_response.auth_policies:
pape_response = None
# Map different consumer status codes to template contexts.
results = {
consumer.CANCEL:
{'message': 'OpenID authentication cancelled.'},
consumer.FAILURE:
{'error': 'OpenID authentication failed.'},
consumer.SUCCESS:
{'url': response.getDisplayIdentifier(),
'sreg': sreg_response and sreg_response.items(),
'ax': ax_items.items(),
'pape': pape_response}
}
result = results[response.status]
if isinstance(response, consumer.FailureResponse):
# In a real application, this information should be
# written to a log for debugging/tracking OpenID
# authentication failures. In general, the messages are
# not user-friendly, but intended for developers.
result['failure_reason'] = response.message
return renderIndexPage(request, **result)
def rpXRDS(request):
"""
Return a relying party verification XRDS document
"""
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
return util.renderXRDS(request, [RP_RETURN_TO_URL_TYPE], [return_to])
| apache-2.0 | -2,082,647,464,285,074,200 | 35.901345 | 89 | 0.635436 | false |
jgomezdans/KaFKA | kafka/inference/solvers.py | 1 | 5323 | #!/usr/bin/env python
"""Some solvers"""
# KaFKA A fast Kalman filter implementation for raster based datasets.
# Copyright (c) 2017 J Gomez-Dans. All rights reserved.
#
# This file is part of KaFKA.
#
# KaFKA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KaFKA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KaFKA. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
#from utils import matrix_squeeze, spsolve2, reconstruct_array
# Set up logging
import logging
LOG = logging.getLogger(__name__+".solvers")
__author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017 J Gomez-Dans"
__version__ = "1.0 (09.03.2017)"
__license__ = "GPLv3"
__email__ = "[email protected]"
def variational_kalman( observations, mask, state_mask, uncertainty, H_matrix, n_params,
x_forecast, P_forecast, P_forecast_inv, the_metadata, approx_diagonal=True):
"""We can just use """
if len(H_matrix) == 2:
non_linear = True
H0, H_matrix_ = H_matrix
else:
H0 = 0.
non_linear = False
R_mat = sp.diags(uncertainty.diagonal()[state_mask.flatten()])
LOG.info("Creating linear problem")
y = observations[state_mask]
y = np.where(mask[state_mask], y, 0.)
y_orig = y*1.
if non_linear:
y = y + H_matrix_.dot(x_forecast) - H0
#Aa = matrix_squeeze (P_forecast_inv, mask=maska.ravel())
A = H_matrix_.T.dot(R_mat).dot(H_matrix_) + P_forecast_inv
b = H_matrix_.T.dot(R_mat).dot(y) + P_forecast_inv.dot (x_forecast)
b = b.astype(np.float32)
A = A.astype(np.float32)
# Here we can either do a spLU of A, and solve, or we can have a first go
# by assuming P_forecast_inv is diagonal, and use the inverse of A_approx as
# a preconditioner
LOG.info("Solving")
AI = sp.linalg.splu (A)
x_analysis = AI.solve (b)
# So retval is the solution vector and A is the Hessian
# (->inv(A) is posterior cov)
fwd_modelled = H_matrix_.dot(x_analysis-x_forecast) + H0
innovations = y_orig - fwd_modelled
#x_analysis = reconstruct_array ( x_analysis_prime, x_forecast,
# mask.ravel(), n_params=n_params)
return x_analysis, None, A, innovations, fwd_modelled
def sort_band_data(H_matrix, observations, uncertainty, mask,
x0, x_forecast, state_mask):
if len(H_matrix) == 2:
non_linear = True
H0, H_matrix_ = H_matrix
else:
H0 = 0.
H_matrix_ = H_matrix
non_linear = False
R = uncertainty.diagonal()[state_mask.flatten()]
y = observations[state_mask]
y = np.where(mask[state_mask], y, 0.)
y_orig = y*1.
if non_linear:
y = y + H_matrix_.dot(x0) - H0
return H_matrix_, H0, R, y, y_orig
def variational_kalman_multiband( observations_b, mask_b, state_mask, uncertainty_b, H_matrix_b, n_params,
x0, x_forecast, P_forecast, P_forecast_inv, the_metadata_b, approx_diagonal=True):
"""We can just use """
n_bands = len(observations_b)
y = []
y_orig = []
H_matrix = []
H0 = []
R_mat = []
for i in range(n_bands):
a, b, c, d, e = sort_band_data(H_matrix_b[i], observations_b[i],
uncertainty_b[i], mask_b[i], x0, x_forecast, state_mask)
H_matrix.append(a)
H0.append(b)
R_mat.append(c)
y.append(d)
y_orig.append(e)
H_matrix_ = sp.vstack(H_matrix)
H0 = np.hstack(H0)
R_mat = sp.diags(np.hstack(R_mat))
y = np.hstack(y)
y_orig = np.hstack(y_orig)
#Aa = matrix_squeeze (P_forecast_inv, mask=maska.ravel())
A = H_matrix_.T.dot(R_mat).dot(H_matrix_) + P_forecast_inv
b = H_matrix_.T.dot(R_mat).dot(y) + P_forecast_inv.dot (x_forecast)
b = b.astype(np.float32)
A = A.astype(np.float32)
# Here we can either do a spLU of A, and solve, or we can have a first go
# by assuming P_forecast_inv is diagonal, and use the inverse of A_approx as
# a preconditioner
LOG.info("Solving")
AI = sp.linalg.splu (A)
x_analysis = AI.solve (b)
# So retval is the solution vector and A is the Hessian
# (->inv(A) is posterior cov)
fwd_modelled = H_matrix_.dot(x_analysis-x_forecast) + H0
innovations = y_orig - fwd_modelled
""" For now I am going to return innovations as y_orig - H0 as
That is what is needed by the Hessian correction. Need to discuss with Jose
What the intention for innovations is and then we can find the best solution"""
innovations = y_orig - H0
#x_analysis = reconstruct_array ( x_analysis_prime, x_forecast,
# mask.ravel(), n_params=n_params)
return x_analysis, None, A, innovations, fwd_modelled
| gpl-3.0 | 4,130,343,850,676,558,300 | 35.458904 | 106 | 0.624084 | false |
hepochen/hoedown_misaka | tests/run_tests.py | 4 | 2663 | # -*- coding: utf-8 -*-
import importlib
import inspect
import os
import sys
from itertools import chain
from os.path import dirname, join as jp, splitext
CWD = dirname(sys.modules[__name__].__file__)
sys.path.insert(0, jp(CWD, '..'))
from chibitest import runner, TestCase, Benchmark
help_message = """\
Options:
--include (-i) comma separated list of testcases
--exclude (-e) comma separated list of testcases
--benchmark (-b) run bechmarks
--list (-l) list all testcases
"""
def get_test_modules():
modules = []
for n in os.listdir(CWD):
if n.startswith('test_') and n.endswith('.py'):
n, _ = splitext(n)
modules.append(importlib.import_module(n))
return modules
def is_testcase(n):
return inspect.isclass(n) \
and issubclass(n, TestCase) \
and not n is TestCase \
and not n is Benchmark
def is_benchmark(n):
return inspect.isclass(n) \
and issubclass(n, Benchmark) \
and not n is Benchmark
def get_testcases(module):
return [(testcase.__name__, testcase) \
for _, testcase in inspect.getmembers(module, is_testcase)]
def run_testcases(testcases, benchmark=False, include=[], exclude=[]):
if include:
testcases = [n for n in testcases if n[0] in include]
if exclude:
testcases = [n for n in testcases if not n[0] in exclude]
if benchmark:
testcases = [n[1] for n in testcases if is_benchmark(n[1])]
else:
testcases = [n[1] for n in testcases if not is_benchmark(n[1])]
runner(testcases)
if __name__ == '__main__':
testcases = list(chain(*map(get_testcases, get_test_modules())))
include = []
exclude = []
benchmark = False
if len(sys.argv) >= 2:
if sys.argv[1] in ('-l', '--list'):
for name, testcase in testcases:
print(name)
sys.exit(0)
elif sys.argv[1] in ('-h', '--help'):
print(help_message)
sys.exit(0)
else:
last_arg = '--include'
for arg in sys.argv[1:]:
if arg in ('-i', '--include', '-e', '--exclude'):
last_arg = arg
elif not arg.startswith('-'): # - or --
arg = [n for n in arg.split(',') if n]
if last_arg in ('-i', '--include'):
include.extend(arg)
elif last_arg in ('-e', '--exclude'):
exclude.extend(arg)
if '-b' in sys.argv[1:] or '--benchmark' in sys.argv[1:]:
benchmark = True
run_testcases(testcases, benchmark, include, exclude)
| mit | -2,168,864,859,217,924,400 | 26.453608 | 71 | 0.553136 | false |
praekelt/malaria24-django | malaria24/ona/tests/test_admin.py | 1 | 6051 | from django.contrib.auth.models import User
from django.core import urlresolvers
from django.db.models.signals import post_save
from django.test import override_settings
from mock import patch
from malaria24.ona.models import (
ReportedCase,
new_case_alert_ehps,
new_case_alert_mis, new_case_alert_jembi)
from .base import MalariaTestCase
class ReportedCaseAdminTest(MalariaTestCase):
def setUp(self):
super(ReportedCaseAdminTest, self).setUp()
post_save.disconnect(
new_case_alert_ehps, sender=ReportedCase)
post_save.disconnect(
new_case_alert_mis, sender=ReportedCase)
post_save.disconnect(
new_case_alert_jembi, sender=ReportedCase)
User.objects.create_superuser(
username='test',
password='test',
email='[email protected]'
)
self.client.login(username='test', password='test')
def tearDown(self):
super(ReportedCaseAdminTest, self).tearDown()
post_save.connect(
new_case_alert_ehps, sender=ReportedCase)
post_save.connect(
new_case_alert_mis, sender=ReportedCase)
post_save.connect(
new_case_alert_jembi, sender=ReportedCase)
@override_settings(FORWARD_TO_JEMBI=False)
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_setting_disables_send_to_jembi(self, mock_task):
case = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456")
case.save()
case.digest = None
data = {
'action': 'send_jembi_alert',
'_selected_action': [case.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.not_called()
self.assertContains(response, "Sending to Jembi currently disabled.")
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_only_unsent_cases_sent_to_jembi(self, mock_task):
case1 = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456",
jembi_alert_sent=True)
case2 = self.mk_case(first_name="Mark", last_name="Day", gender="male",
msisdn="0711111112", landmark_description="None",
id_type="said", case_number="20171214-123456-56",
abroad="No", locality="None",
reported_by="+27721111112",
sa_id_number="5610031111083",
landmark="School", facility_code="123456")
case1.save()
case2.save()
data = {
'action': 'send_jembi_alert',
'_selected_action': [case1.pk, case2.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.assert_called_with(case2.pk)
self.assertContains(response,
"Forwarding all unsent cases to Jembi (total 1).")
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_task_called_for_each_selected_unsent_case(self, mock_task):
case1 = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456")
case2 = self.mk_case(first_name="Mark", last_name="Day", gender="male",
msisdn="0711111112", landmark_description="None",
id_type="said", case_number="20171214-123456-56",
abroad="No", locality="None",
reported_by="+27721111112",
sa_id_number="5610031111083",
landmark="School", facility_code="123456")
case3 = self.mk_case(first_name="Luke", last_name="Day", gender="male",
msisdn="0711111113", landmark_description="None",
id_type="said", case_number="20171214-123456-64",
abroad="No", locality="None",
reported_by="+27721111113",
sa_id_number="8112051111083",
landmark="School", facility_code="123456")
case1.save()
case2.save()
case3.save()
data = {
'action': 'send_jembi_alert',
'_selected_action': [case1.pk, case2.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.assert_any_call(case1.pk)
mock_task.assert_any_call(case2.pk)
self.assertContains(response,
"Forwarding all unsent cases to Jembi (total 2).")
| bsd-2-clause | -7,128,659,252,724,081,000 | 47.408 | 79 | 0.54239 | false |
yongshengwang/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/find_template.py | 35 | 1196 | from django.core.management.base import LabelCommand
from django.template import loader
from django.template import TemplateDoesNotExist
import sys
from django_extensions.management.utils import signalcommand
def get_template_path(path):
try:
template = loader.find_template(path)
if template[1]:
return template[1].name
# work arround https://code.djangoproject.com/ticket/17199 issue
for template_loader in loader.template_source_loaders:
try:
source, origin = template_loader.load_template_source(path)
return origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(path)
except TemplateDoesNotExist:
return None
class Command(LabelCommand):
help = "Finds the location of the given template by resolving its path"
args = "[template_path]"
label = 'template path'
@signalcommand
def handle_label(self, template_path, **options):
path = get_template_path(template_path)
if path is None:
sys.stderr.write("No template found\n")
sys.exit(1)
else:
print(path)
| apache-2.0 | 7,475,929,403,546,993,000 | 30.473684 | 75 | 0.65301 | false |
BigBrother-International/gst-cerbero | cerbero/packages/osx/buildtools.py | 3 | 3083 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
from cerbero.packages.osx.info_plist import ComponentPropertyPlist
from cerbero.utils import shell
class PackageBuild(object):
''' Wrapper for the packagebuild application '''
CMD = 'pkgbuild'
def create_package(self, root, pkg_id, version, title, output_file,
destination='/opt/', scripts_path=None):
'''
Creates an osx flat package, where all files are properly bundled in a
directory that is set as the package root
@param root: root path
@type root: str
@param pkg_id: package indentifier
@type pkg_id: str
@param version: package version
@type version: str
@param title: package title
@type title: str
@param output_file: path of the output file
@type output_file: str
@param destination: installation path
@type destination: str
@param scripts_path: relative path for package scripts
@type scripts_path: str
'''
args = {'root': root, 'identifier': pkg_id, 'version': version,
'install-location': destination}
if scripts_path is not None:
args['scripts'] = scripts_path
#plist = tempfile.NamedTemporaryFile()
#cpl = ComponentPropertyPlist(title, os.path.basename(output_file))
#cpl.save(plist.name)
#args['component-plist'] = plist.name
shell.call(self._cmd_with_args(args, output_file))
def _cmd_with_args(self, args, output):
args_str = ''
for k, v in args.iteritems():
args_str += " --%s '%s'" % (k, v)
return '%s %s %s' % (self.CMD, args_str, output)
class ProductBuild (object):
''' Wrapper for the packagebuild application '''
CMD = 'productbuild'
def create_app_package(self, app_bundle, output):
shell.call("%s --component %s /Applications %s"
% (self.CMD, app_bundle, output))
def create_package(self, distribution, output, package_path=None):
cmd = "%s --distribution %s %s" % (self.CMD, distribution, output)
for p in package_path:
cmd += ' --package-path %s' % p
shell.call(cmd)
| lgpl-2.1 | -8,094,534,842,700,599,000 | 36.597561 | 78 | 0.64807 | false |
hramrach/osc | tests/test_addfiles.py | 15 | 3192 | import osc.core
import osc.oscerr
import os
import sys
from common import OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'addfile_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestAddFiles)
class TestAddFiles(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def testSimpleAdd(self):
"""add one file ('toadd1') to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
exp = 'A toadd1\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_status(p, 'toadd1', 'A')
self._check_addlist('toadd1\n')
def testSimpleMultipleAdd(self):
"""add multiple files ('toadd1', 'toadd2') to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
p.addfile('toadd2')
exp = 'A toadd1\nA toadd2\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd2')))
self._check_status(p, 'toadd1', 'A')
self._check_status(p, 'toadd2', 'A')
self._check_addlist('toadd1\ntoadd2\n')
def testAddVersionedFile(self):
"""add a versioned file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.PackageFileConflict, p.addfile, 'merge')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
self._check_status(p, 'merge', ' ')
def testAddUnversionedFileTwice(self):
"""add the same file twice"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
self.assertRaises(osc.oscerr.PackageFileConflict, p.addfile, 'toadd1')
exp = 'A toadd1\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_status(p, 'toadd1', 'A')
self._check_addlist('toadd1\n')
def testReplace(self):
"""replace a deleted file ('foo')"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
open('foo', 'w').write('replaced file\n')
p.addfile('foo')
exp = 'A foo\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self.assertNotEqual(open(os.path.join('.osc', 'foo'), 'r').read(), 'replaced file\n')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_status(p, 'foo', 'R')
self._check_addlist('foo\n')
def testAddNonExistentFile(self):
"""add a non existent file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.addfile, 'doesnotexist')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 | -3,599,170,910,826,904,600 | 36.552941 | 93 | 0.593358 | false |
ubc/edx-platform | common/djangoapps/course_modes/tests/test_views.py | 64 | 15334 | import unittest
import decimal
import ddt
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from util.testing import UrlResetMixin
from embargo.test_utils import restrict_course
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.tests.factories import CourseModeFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode, Mode
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseModeViewTest(UrlResetMixin, ModuleStoreTestCase):
@patch.dict(settings.FEATURES, {'MODE_CREATION_FOR_TESTING': True})
def setUp(self):
super(CourseModeViewTest, self).setUp('course_modes.urls')
self.course = CourseFactory.create()
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
# is_active?, enrollment_mode, redirect?
(True, 'verified', True),
(True, 'honor', False),
(True, 'audit', False),
(False, 'verified', False),
(False, 'honor', False),
(False, 'audit', False),
(False, None, False),
)
@ddt.unpack
def test_redirect_to_dashboard(self, is_active, enrollment_mode, redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the test course
if enrollment_mode is not None:
CourseEnrollmentFactory(
is_active=is_active,
mode=enrollment_mode,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
if redirect:
self.assertRedirects(response, reverse('dashboard'))
else:
self.assertEquals(response.status_code, 200)
def test_no_id_redirect(self):
# Create the course modes
CourseModeFactory(mode_slug=CourseMode.NO_ID_PROFESSIONAL_MODE, course_id=self.course.id, min_price=100)
# Enroll the user in the test course
CourseEnrollmentFactory(
is_active=False,
mode=CourseMode.NO_ID_PROFESSIONAL_MODE,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
def test_no_enrollment(self):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# User visits the track selection page directly without ever enrolling
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
@ddt.data(
'',
'1,,2',
'1, ,2',
'1, 2, 3'
)
def test_suggested_prices(self, price_list):
# Create the course modes
for mode in ('audit', 'honor'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
CourseModeFactory(
mode_slug='verified',
course_id=self.course.id,
suggested_prices=price_list
)
# Enroll the user in the test course to emulate
# automatic enrollment
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
# Verify that the prices render correctly
response = self.client.get(
reverse('course_modes_choose', args=[unicode(self.course.id)]),
follow=False,
)
self.assertEquals(response.status_code, 200)
# TODO: Fix it so that response.templates works w/ mako templates, and then assert
# that the right template rendered
@ddt.data(
(['honor', 'verified', 'credit'], True),
(['honor', 'verified'], False),
)
@ddt.unpack
def test_credit_upsell_message(self, available_modes, show_upsell):
# Create the course modes
for mode in available_modes:
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Check whether credit upsell is shown on the page
# This should *only* be shown when a credit mode is available
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
if show_upsell:
self.assertContains(response, "Credit")
else:
self.assertNotContains(response, "Credit")
@ddt.data('professional', 'no-id-professional')
def test_professional_enrollment(self, mode):
# The only course mode is professional ed
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=1)
# Go to the "choose your track" page
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(choose_track_url)
# Since the only available track is professional ed, expect that
# we're redirected immediately to the start of the payment flow.
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
# Now enroll in the course
CourseEnrollmentFactory(
user=self.user,
is_active=True,
mode=mode,
course_id=unicode(self.course.id),
)
# Expect that this time we're redirected to the dashboard (since we're already registered)
response = self.client.get(choose_track_url)
self.assertRedirects(response, reverse('dashboard'))
# Mapping of course modes to the POST parameters sent
# when the user chooses that mode.
POST_PARAMS_FOR_COURSE_MODE = {
'honor': {'honor_mode': True},
'verified': {'verified_mode': True, 'contribution': '1.23'},
'unsupported': {'unsupported_mode': True},
}
@ddt.data(
('honor', 'dashboard'),
('verified', 'start-flow'),
)
@ddt.unpack
def test_choose_mode_redirect(self, course_mode, expected_redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if course_mode in ["honor", "audit"] else 1
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=min_price)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE[course_mode])
# Verify the redirect
if expected_redirect == 'dashboard':
redirect_url = reverse('dashboard')
elif expected_redirect == 'start-flow':
redirect_url = reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(self.course.id)}
)
else:
self.fail("Must provide a valid redirect URL name")
self.assertRedirects(response, redirect_url)
def test_remember_donation_for_course(self):
# Create the course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['verified'])
# Expect that the contribution amount is stored in the user's session
self.assertIn('donation_for_course', self.client.session)
self.assertIn(unicode(self.course.id), self.client.session['donation_for_course'])
actual_amount = self.client.session['donation_for_course'][unicode(self.course.id)]
expected_amount = decimal.Decimal(self.POST_PARAMS_FOR_COURSE_MODE['verified']['contribution'])
self.assertEqual(actual_amount, expected_amount)
def test_successful_honor_enrollment(self):
# Create the course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the default mode (honor) to emulate
# automatic enrollment
params = {
'enrollment_action': 'enroll',
'course_id': unicode(self.course.id)
}
self.client.post(reverse('change_enrollment'), params)
# Explicitly select the honor mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['honor'])
# Verify that the user's enrollment remains unchanged
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course.id)
self.assertEqual(mode, 'honor')
self.assertEqual(is_active, True)
def test_unsupported_enrollment_mode_failure(self):
# Create the supported course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Choose an unsupported mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['unsupported'])
self.assertEqual(400, response.status_code)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_default_mode_creation(self):
# Hit the mode creation endpoint with no querystring params, to create an honor mode
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
(u'verified', u'Verified Certificate', 10, '10,20,30', 'usd'),
(u'professional', u'Professional Education', 100, '100,200', 'usd'),
)
@ddt.unpack
def test_verified_mode_creation(self, mode_slug, mode_display_name, min_price, suggested_prices, currency):
parameters = {}
parameters['mode_slug'] = mode_slug
parameters['mode_display_name'] = mode_display_name
parameters['min_price'] = min_price
parameters['suggested_prices'] = suggested_prices
parameters['currency'] = currency
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url, parameters)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(mode_slug, mode_display_name, min_price, suggested_prices, currency, None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_multiple_mode_creation(self):
# Create an honor mode
base_url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(base_url)
# Excluding the currency parameter implicitly tests the mode creation endpoint's ability to
# use default values when parameters are partially missing.
parameters = {}
parameters['mode_slug'] = u'verified'
parameters['mode_display_name'] = u'Verified Certificate'
parameters['min_price'] = 10
parameters['suggested_prices'] = '10,20'
# Create a verified mode
url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(url, parameters)
honor_mode = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)
verified_mode = Mode(u'verified', u'Verified Certificate', 10, '10,20', 'usd', None, None, None)
expected_modes = [honor_mode, verified_mode]
course_modes = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_modes, expected_modes)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@patch.dict(settings.FEATURES, {"IS_EDX_DOMAIN": True})
def test_hide_nav(self):
# Create the course modes
for mode in ["honor", "verified"]:
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Load the track selection page
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TrackSelectionEmbargoTest(UrlResetMixin, ModuleStoreTestCase):
"""Test embargo restrictions on the track selection page. """
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(TrackSelectionEmbargoTest, self).setUp('embargo')
# Create a course and course modes
self.course = CourseFactory.create()
CourseModeFactory(mode_slug='honor', course_id=self.course.id)
CourseModeFactory(mode_slug='verified', course_id=self.course.id, min_price=10)
# Create a user and log in
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
# Construct the URL for the track selection page
self.url = reverse('course_modes_choose', args=[unicode(self.course.id)])
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
with restrict_course(self.course.id) as redirect_url:
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url)
def test_embargo_allow(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
| agpl-3.0 | 1,549,863,987,658,812,700 | 40.443243 | 117 | 0.646472 | false |
wolfier/incubator-airflow | airflow/sensors/base_sensor_operator.py | 5 | 2739 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from time import sleep
from airflow.exceptions import AirflowException, AirflowSensorTimeout, \
AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
"""
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
"""
ui_color = '#e6f1f2'
@apply_defaults
def __init__(self,
poke_interval=60,
timeout=60 * 60 * 24 * 7,
soft_fail=False,
*args,
**kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context):
started_at = timezone.utcnow()
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
| apache-2.0 | 1,882,598,029,010,323,500 | 35.52 | 79 | 0.665571 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/web/test/test_xml.py | 10 | 41831 | # -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Some fairly inadequate testcases for Twisted XML support.
"""
from twisted.trial.unittest import TestCase
from twisted.web import sux
from twisted.web import microdom
from twisted.web import domhelpers
class Sux0r(sux.XMLParser):
def __init__(self):
self.tokens = []
def getTagStarts(self):
return [token for token in self.tokens if token[0] == 'start']
def gotTagStart(self, name, attrs):
self.tokens.append(("start", name, attrs))
def gotText(self, text):
self.tokens.append(("text", text))
class SUXTests(TestCase):
def testBork(self):
s = "<bork><bork><bork>"
ms = Sux0r()
ms.connectionMade()
ms.dataReceived(s)
self.assertEqual(len(ms.getTagStarts()),3)
class MicroDOMTests(TestCase):
def test_leadingTextDropping(self):
"""
Make sure that if there's no top-level node lenient-mode won't
drop leading text that's outside of any elements.
"""
s = "Hi orders! <br>Well. <br>"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html>Hi orders! <br />Well. <br /></html>')
def test_trailingTextDropping(self):
"""
Ensure that no *trailing* text in a mal-formed
no-top-level-element document(s) will not be dropped.
"""
s = "<br>Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html><br />Hi orders!</html>')
def test_noTags(self):
"""
A string with nothing that looks like a tag at all should just
be parsed as body text.
"""
s = "Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi orders!</html>")
def test_surroundingCrap(self):
"""
If a document is surrounded by non-xml text, the text should
be remain in the XML.
"""
s = "Hi<br> orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi<br /> orders!</html>")
def testCaseSensitiveSoonCloser(self):
s = """
<HTML><BODY>
<P ALIGN="CENTER">
<A HREF="http://www.apache.org/"><IMG SRC="/icons/apache_pb.gif"></A>
</P>
<P>
This is an insane set of text nodes that should NOT be gathered under
the A tag above.
</P>
</BODY></HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'a')
n = domhelpers.gatherTextNodes(l[0],1).replace(' ',' ')
self.assertEqual(n.find('insane'), -1)
def test_lenientParenting(self):
"""
Test that C{parentNode} attributes are set to meaningful values when
we are parsing HTML that lacks a root node.
"""
# Spare the rod, ruin the child.
s = "<br/><br/>"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def test_lenientParentSingle(self):
"""
Test that the C{parentNode} attribute is set to a meaningful value
when we parse an HTML document that has a non-Element root node.
"""
s = "Hello"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def testUnEntities(self):
s = """
<HTML>
This HTML goes between Stupid <=CrAzY!=> Dumb.
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
n = domhelpers.gatherTextNodes(d)
self.assertNotEqual(n.find('>'), -1)
def testEmptyError(self):
self.assertRaises(sux.ParseError, microdom.parseString, "")
def testTameDocument(self):
s = """
<test>
<it>
<is>
<a>
test
</a>
</is>
</it>
</test>
"""
d = microdom.parseString(s)
self.assertEqual(
domhelpers.gatherTextNodes(d.documentElement).strip() ,'test')
def testAwfulTagSoup(self):
s = """
<html>
<head><title> I send you this message to have your advice!!!!</titl e
</headd>
<body bgcolor alink hlink vlink>
<h1><BLINK>SALE</blINK> TWENTY MILLION EMAILS & FUR COAT NOW
FREE WITH `ENLARGER'</h1>
YES THIS WONDERFUL AWFER IS NOW HERER!!!
<script LANGUAGE="javascript">
function give_answers() {
if (score < 70) {
alert("I hate you");
}}
</script><a href=/foo.com/lalal name=foo>lalal</a>
</body>
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'blink')
self.assertEqual(len(l), 1)
def testScriptLeniency(self):
s = """
<script>(foo < bar) and (bar > foo)</script>
<script language="javascript">foo </scrip bar </script>
<script src="foo">
<script src="foo">baz</script>
<script /><script></script>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertEqual(d.firstChild().firstChild().firstChild().data,
"(foo < bar) and (bar > foo)")
self.assertEqual(
d.firstChild().getElementsByTagName("script")[1].firstChild().data,
"foo </scrip bar ")
def testScriptLeniencyIntelligence(self):
# if there is comment or CDATA in script, the autoquoting in bEL mode
# should not happen
s = """<script><!-- lalal --></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script><![CDATA[lalal]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script> // <![CDATA[
lalal
//]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
def testPreserveCase(self):
s = '<eNcApSuLaTe><sUxor></sUxor><bOrk><w00T>TeXt</W00t></BoRk></EnCaPsUlAtE>'
s2 = s.lower().replace('text', 'TeXt')
# these are the only two option permutations that *can* parse the above
d = microdom.parseString(s, caseInsensitive=1, preserveCase=1)
d2 = microdom.parseString(s, caseInsensitive=1, preserveCase=0)
# caseInsensitive=0 preserveCase=0 is not valid, it's converted to
# caseInsensitive=0 preserveCase=1
d3 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d4 = microdom.parseString(s2, caseInsensitive=1, preserveCase=0)
d5 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
# this is slightly contrived, toxml() doesn't need to be identical
# for the documents to be equivalent (i.e. <b></b> to <b/>),
# however this assertion tests preserving case for start and
# end tags while still matching stuff like <bOrk></BoRk>
self.assertEqual(d.documentElement.toxml(), s)
self.assertTrue(d.isEqualToDocument(d2), "%r != %r" % (d.toxml(), d2.toxml()))
self.assertTrue(d2.isEqualToDocument(d3), "%r != %r" % (d2.toxml(), d3.toxml()))
# caseInsensitive=0 on the left, NOT perserveCase=1 on the right
## XXX THIS TEST IS TURNED OFF UNTIL SOMEONE WHO CARES ABOUT FIXING IT DOES
#self.assertFalse(d3.isEqualToDocument(d2), "%r == %r" % (d3.toxml(), d2.toxml()))
self.assertTrue(d3.isEqualToDocument(d4), "%r != %r" % (d3.toxml(), d4.toxml()))
self.assertTrue(d4.isEqualToDocument(d5), "%r != %r" % (d4.toxml(), d5.toxml()))
def testDifferentQuotes(self):
s = '<test a="a" b=\'b\' />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), 'b')
def testLinebreaks(self):
s = '<test \na="a"\n\tb="#b" />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), '#b')
def testMismatchedTags(self):
for s in '<test>', '<test> </tset>', '</test>':
self.assertRaises(microdom.MismatchedTags, microdom.parseString, s)
def testComment(self):
s = "<bar><!--<foo />--></bar>"
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.nodeName, "bar")
c = e.childNodes[0]
self.assertTrue(isinstance(c, microdom.Comment))
self.assertEqual(c.value, "<foo />")
c2 = c.cloneNode()
self.assertTrue(c is not c2)
self.assertEqual(c2.toxml(), "<!--<foo />-->")
def testText(self):
d = microdom.parseString("<bar>xxxx</bar>").documentElement
text = d.childNodes[0]
self.assertTrue(isinstance(text, microdom.Text))
self.assertEqual(text.value, "xxxx")
clone = text.cloneNode()
self.assertTrue(clone is not text)
self.assertEqual(clone.toxml(), "xxxx")
def testEntities(self):
nodes = microdom.parseString("<b>&AB;</b>").documentElement.childNodes
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].data, "&")
self.assertEqual(nodes[1].data, "AB;")
self.assertEqual(nodes[0].cloneNode().toxml(), "&")
for n in nodes:
self.assertTrue(isinstance(n, microdom.EntityReference))
def testCData(self):
s = '<x><![CDATA[</x>\r\n & foo]]></x>'
cdata = microdom.parseString(s).documentElement.childNodes[0]
self.assertTrue(isinstance(cdata, microdom.CDATASection))
self.assertEqual(cdata.data, "</x>\r\n & foo")
self.assertEqual(cdata.cloneNode().toxml(), "<![CDATA[</x>\r\n & foo]]>")
def testSingletons(self):
s = "<foo><b/><b /><b\n/></foo>"
s2 = "<foo><b/><b/><b/></foo>"
nodes = microdom.parseString(s).documentElement.childNodes
nodes2 = microdom.parseString(s2).documentElement.childNodes
self.assertEqual(len(nodes), 3)
for (n, n2) in zip(nodes, nodes2):
self.assertTrue(isinstance(n, microdom.Element))
self.assertEqual(n.nodeName, "b")
self.assertTrue(n.isEqualToNode(n2))
def testAttributes(self):
s = '<foo a="b" />'
node = microdom.parseString(s).documentElement
self.assertEqual(node.getAttribute("a"), "b")
self.assertEqual(node.getAttribute("c"), None)
self.assertTrue(node.hasAttribute("a"))
self.assertTrue(not node.hasAttribute("c"))
a = node.getAttributeNode("a")
self.assertEqual(a.value, "b")
node.setAttribute("foo", "bar")
self.assertEqual(node.getAttribute("foo"), "bar")
def testChildren(self):
s = "<foo><bar /><baz /><bax>foo</bax></foo>"
d = microdom.parseString(s).documentElement
self.assertEqual([n.nodeName for n in d.childNodes], ["bar", "baz", "bax"])
self.assertEqual(d.lastChild().nodeName, "bax")
self.assertEqual(d.firstChild().nodeName, "bar")
self.assertTrue(d.hasChildNodes())
self.assertTrue(not d.firstChild().hasChildNodes())
def testMutate(self):
s = "<foo />"
s1 = '<foo a="b"><bar/><foo/></foo>'
s2 = '<foo a="b">foo</foo>'
d = microdom.parseString(s).documentElement
d1 = microdom.parseString(s1).documentElement
d2 = microdom.parseString(s2).documentElement
d.appendChild(d.cloneNode())
d.setAttribute("a", "b")
child = d.childNodes[0]
self.assertEqual(child.getAttribute("a"), None)
self.assertEqual(child.nodeName, "foo")
d.insertBefore(microdom.Element("bar"), child)
self.assertEqual(d.childNodes[0].nodeName, "bar")
self.assertEqual(d.childNodes[1], child)
for n in d.childNodes:
self.assertEqual(n.parentNode, d)
self.assertTrue(d.isEqualToNode(d1))
d.removeChild(child)
self.assertEqual(len(d.childNodes), 1)
self.assertEqual(d.childNodes[0].nodeName, "bar")
t = microdom.Text("foo")
d.replaceChild(t, d.firstChild())
self.assertEqual(d.firstChild(), t)
self.assertTrue(d.isEqualToNode(d2))
def test_replaceNonChild(self):
"""
L{Node.replaceChild} raises L{ValueError} if the node given to be
replaced is not a child of the node C{replaceChild} is called on.
"""
parent = microdom.parseString('<foo />')
orphan = microdom.parseString('<bar />')
replacement = microdom.parseString('<baz />')
self.assertRaises(
ValueError, parent.replaceChild, replacement, orphan)
def testSearch(self):
s = "<foo><bar id='me' /><baz><foo /></baz></foo>"
s2 = "<fOo><bAr id='me' /><bAz><fOO /></bAz></fOo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d3 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
root = d.documentElement
self.assertEqual(root.firstChild(), d.getElementById('me'))
self.assertEqual(d.getElementsByTagName("foo"),
[root, root.lastChild().firstChild()])
root = d2.documentElement
self.assertEqual(root.firstChild(), d2.getElementById('me'))
self.assertEqual(d2.getElementsByTagName('fOo'), [root])
self.assertEqual(d2.getElementsByTagName('fOO'),
[root.lastChild().firstChild()])
self.assertEqual(d2.getElementsByTagName('foo'), [])
root = d3.documentElement
self.assertEqual(root.firstChild(), d3.getElementById('me'))
self.assertEqual(d3.getElementsByTagName('FOO'),
[root, root.lastChild().firstChild()])
self.assertEqual(d3.getElementsByTagName('fOo'),
[root, root.lastChild().firstChild()])
def testDoctype(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE foo PUBLIC "baz" "http://www.example.com/example.dtd">'
'<foo></foo>')
s2 = '<foo/>'
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
self.assertEqual(d.doctype,
'foo PUBLIC "baz" "http://www.example.com/example.dtd"')
self.assertEqual(d.toxml(), s)
self.assertFalse(d.isEqualToDocument(d2))
self.assertTrue(d.documentElement.isEqualToNode(d2.documentElement))
samples = [("<img/>", "<img />"),
("<foo A='b'>x</foo>", '<foo A="b">x</foo>'),
("<foo><BAR /></foo>", "<foo><BAR></BAR></foo>"),
("<foo>hello there & yoyoy</foo>",
"<foo>hello there & yoyoy</foo>"),
]
def testOutput(self):
for s, out in self.samples:
d = microdom.parseString(s, caseInsensitive=0)
d2 = microdom.parseString(out, caseInsensitive=0)
testOut = d.documentElement.toxml()
self.assertEqual(out, testOut)
self.assertTrue(d.isEqualToDocument(d2))
def testErrors(self):
for s in ["<foo>&am</foo>", "<foo", "<f>&</f>", "<() />"]:
self.assertRaises(Exception, microdom.parseString, s)
def testCaseInsensitive(self):
s = "<foo a='b'><BAx>x</bax></FOO>"
s2 = '<foo a="b"><bax>x</bax></foo>'
s3 = "<FOO a='b'><BAx>x</BAx></FOO>"
s4 = "<foo A='b'>x</foo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
d3 = microdom.parseString(s3, caseInsensitive=1)
d4 = microdom.parseString(s4, caseInsensitive=1, preserveCase=1)
d5 = microdom.parseString(s4, caseInsensitive=1, preserveCase=0)
d6 = microdom.parseString(s4, caseInsensitive=0, preserveCase=0)
out = microdom.parseString(s).documentElement.toxml()
self.assertRaises(microdom.MismatchedTags, microdom.parseString,
s, caseInsensitive=0)
self.assertEqual(out, s2)
self.assertTrue(d.isEqualToDocument(d2))
self.assertTrue(d.isEqualToDocument(d3))
self.assertTrue(d4.documentElement.hasAttribute('a'))
self.assertFalse(d6.documentElement.hasAttribute('a'))
self.assertEqual(d4.documentElement.toxml(), '<foo A="b">x</foo>')
self.assertEqual(d5.documentElement.toxml(), '<foo a="b">x</foo>')
def testEatingWhitespace(self):
s = """<hello>
</hello>"""
d = microdom.parseString(s)
self.assertTrue(not d.documentElement.hasChildNodes(),
d.documentElement.childNodes)
self.assertTrue(d.isEqualToDocument(microdom.parseString('<hello></hello>')))
def testLenientAmpersand(self):
prefix = "<?xml version='1.0'?>"
# we use <pre> so space will be preserved
for i, o in [("&", "&"),
("& ", "& "),
("&", "&"),
("&hello monkey", "&hello monkey")]:
d = microdom.parseString("%s<pre>%s</pre>"
% (prefix, i), beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<pre>%s</pre>" % o)
# non-space preserving
d = microdom.parseString("<t>hello & there</t>", beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<t>hello & there</t>")
def testInsensitiveLenient(self):
# testing issue #537
d = microdom.parseString(
"<?xml version='1.0'?><bar><xA><y>c</Xa> <foo></bar>",
beExtremelyLenient=1)
self.assertEqual(d.documentElement.firstChild().toxml(), "<xa><y>c</y></xa>")
def testLaterCloserSimple(self):
s = "<ul><li>foo<li>bar<li>baz</ul>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<ul><li>foo</li><li>bar</li><li>baz</li></ul>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserCaseInsensitive(self):
s = "<DL><p><DT>foo<DD>bar</DL>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<dl><p></p><dt>foo</dt><dd>bar</dd></dl>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserDL(self):
s = ("<dl>"
"<dt>word<dd>definition"
"<dt>word<dt>word<dd>definition<dd>definition"
"</dl>")
expected = ("<dl>"
"<dt>word</dt><dd>definition</dd>"
"<dt>word</dt><dt>word</dt><dd>definition</dd><dd>definition</dd>"
"</dl>")
d = microdom.parseString(s, beExtremelyLenient=1)
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testUnicodeTolerance(self):
import struct
s = '<foo><bar><baz /></bar></foo>'
j =(u'<?xml version="1.0" encoding="UCS-2" ?>\r\n<JAPANESE>\r\n'
u'<TITLE>\u5c02\u9580\u5bb6\u30ea\u30b9\u30c8 </TITLE></JAPANESE>')
j2=('\xff\xfe<\x00?\x00x\x00m\x00l\x00 \x00v\x00e\x00r\x00s\x00i\x00o'
'\x00n\x00=\x00"\x001\x00.\x000\x00"\x00 \x00e\x00n\x00c\x00o\x00d'
'\x00i\x00n\x00g\x00=\x00"\x00U\x00C\x00S\x00-\x002\x00"\x00 \x00?'
'\x00>\x00\r\x00\n\x00<\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E'
'\x00>\x00\r\x00\n\x00<\x00T\x00I\x00T\x00L\x00E\x00>\x00\x02\\'
'\x80\x95\xb6[\xea0\xb90\xc80 \x00<\x00/\x00T\x00I\x00T\x00L\x00E'
'\x00>\x00<\x00/\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E\x00>\x00')
def reverseBytes(s):
fmt = str(len(s) // 2) + 'H'
return struct.pack('<' + fmt, *struct.unpack('>' + fmt, s))
urd = microdom.parseString(reverseBytes(s.encode('UTF-16')))
ud = microdom.parseString(s.encode('UTF-16'))
sd = microdom.parseString(s)
self.assertTrue(ud.isEqualToDocument(sd))
self.assertTrue(ud.isEqualToDocument(urd))
ud = microdom.parseString(j)
urd = microdom.parseString(reverseBytes(j2))
sd = microdom.parseString(j2)
self.assertTrue(ud.isEqualToDocument(sd))
self.assertTrue(ud.isEqualToDocument(urd))
# test that raw text still gets encoded
# test that comments get encoded
j3=microdom.parseString(u'<foo/>')
hdr='<?xml version="1.0"?>'
div=microdom.lmx().text(u'\u221a', raw=1).node
de=j3.documentElement
de.appendChild(div)
de.appendChild(j3.createComment(u'\u221a'))
self.assertEqual(j3.toxml(), hdr+
u'<foo><div>\u221a</div><!--\u221a--></foo>'.encode('utf8'))
def testNamedChildren(self):
tests = {"<foo><bar /><bar unf='1' /><bar>asdfadsf</bar>"
"<bam/></foo>" : 3,
'<foo>asdf</foo>' : 0,
'<foo><bar><bar></bar></bar></foo>' : 1,
}
for t in tests.keys():
node = microdom.parseString(t).documentElement
result = domhelpers.namedChildren(node, 'bar')
self.assertEqual(len(result), tests[t])
if result:
self.assertTrue(hasattr(result[0], 'tagName'))
def testCloneNode(self):
s = '<foo a="b"><bax>x</bax></foo>'
node = microdom.parseString(s).documentElement
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
c1, c2 = node.firstChild(), clone.firstChild()
self.failIfEquals(c1, c2)
self.assertEqual(len(c1.childNodes), len(c2.childNodes))
self.failIfEquals(c1.firstChild(), c2.firstChild())
self.assertEqual(s, clone.toxml())
self.assertEqual(node.namespace, clone.namespace)
def testCloneDocument(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><foo></foo>')
node = microdom.parseString(s)
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
self.assertEqual(s, clone.toxml())
self.assertTrue(clone.isEqualToDocument(node))
self.assertTrue(node.isEqualToDocument(clone))
def testLMX(self):
n = microdom.Element("p")
lmx = microdom.lmx(n)
lmx.text("foo")
b = lmx.b(a="c")
b.foo()["z"] = "foo"
b.foo()
b.add("bar", c="y")
s = '<p>foo<b a="c"><foo z="foo"></foo><foo></foo><bar c="y"></bar></b></p>'
self.assertEqual(s, n.toxml())
def testDict(self):
"""
Returns a dictionary which is hashable.
"""
n = microdom.Element("p")
hash(n)
def testEscaping(self):
# issue 590
raw = "&'some \"stuff\"', <what up?>"
cooked = "&'some "stuff"', <what up?>"
esc1 = microdom.escape(raw)
self.assertEqual(esc1, cooked)
self.assertEqual(microdom.unescape(esc1), raw)
def testNamespaces(self):
s = '''
<x xmlns="base">
<y />
<y q="1" x:q="2" y:q="3" />
<y:y xml:space="1">here is some space </y:y>
<y:y />
<x:y />
</x>
'''
d = microdom.parseString(s)
# at least make sure it doesn't traceback
s2 = d.toprettyxml()
self.assertEqual(d.documentElement.namespace,
"base")
self.assertEqual(d.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
d2 = microdom.parseString(s2)
self.assertEqual(d2.documentElement.namespace,
"base")
self.assertEqual(d2.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d2.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
def testNamespaceDelete(self):
"""
Test that C{toxml} can support xml structures that remove namespaces.
"""
s1 = ('<?xml version="1.0"?><html xmlns="http://www.w3.org/TR/REC-html40">'
'<body xmlns=""></body></html>')
s2 = microdom.parseString(s1).toxml()
self.assertEqual(s1, s2)
def testNamespaceInheritance(self):
"""
Check that unspecified namespace is a thing separate from undefined
namespace. This test added after discovering some weirdness in Lore.
"""
# will only work if childNodes is mutated. not sure why.
child = microdom.Element('ol')
parent = microdom.Element('div', namespace='http://www.w3.org/1999/xhtml')
parent.childNodes = [child]
self.assertEqual(parent.toxml(),
'<div xmlns="http://www.w3.org/1999/xhtml"><ol></ol></div>')
def test_prefixedTags(self):
"""
XML elements with a prefixed name as per upper level tag definition
have a start-tag of C{"<prefix:tag>"} and an end-tag of
C{"</prefix:tag>"}.
Refer to U{http://www.w3.org/TR/xml-names/#ns-using} for details.
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# Create the root in one namespace. Microdom will probably make this
# the default namespace.
root = document.createElement("root", namespace=outerNamespace)
# Give the root some prefixes to use.
root.addPrefixes({innerNamespace: "inner"})
# Append a child to the root from the namespace that prefix is bound
# to.
tag = document.createElement("tag", namespace=innerNamespace)
# Give that tag a child too. This way we test rendering of tags with
# children and without children.
child = document.createElement("child", namespace=innerNamespace)
tag.appendChild(child)
root.appendChild(tag)
document.appendChild(root)
# ok, the xml should appear like this
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:tag><inner:child></inner:child></inner:tag>'
'</root>')
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
def test_prefixPropagation(self):
"""
Children of prefixed tags respect the default namespace at the point
where they are rendered. Specifically, they are not influenced by the
prefix of their parent as that prefix has no bearing on them.
See U{http://www.w3.org/TR/xml-names/#scoping} for details.
To further clarify the matter, the following::
<root xmlns="http://example.com/ns/test">
<mytag xmlns="http://example.com/ns/mytags">
<mysubtag xmlns="http://example.com/ns/mytags">
<element xmlns="http://example.com/ns/test"></element>
</mysubtag>
</mytag>
</root>
Should become this after all the namespace declarations have been
I{moved up}::
<root xmlns="http://example.com/ns/test"
xmlns:mytags="http://example.com/ns/mytags">
<mytags:mytag>
<mytags:mysubtag>
<element></element>
</mytags:mysubtag>
</mytags:mytag>
</root>
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# creates a root element
root = document.createElement("root", namespace=outerNamespace)
document.appendChild(root)
# Create a child with a specific namespace with a prefix bound to it.
root.addPrefixes({innerNamespace: "inner"})
mytag = document.createElement("mytag",namespace=innerNamespace)
root.appendChild(mytag)
# Create a child of that which has the outer namespace.
mysubtag = document.createElement("mysubtag", namespace=outerNamespace)
mytag.appendChild(mysubtag)
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:mytag>'
'<mysubtag></mysubtag>'
'</inner:mytag>'
'</root>'
)
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
class BrokenHTMLTests(TestCase):
"""
Tests for when microdom encounters very bad HTML and C{beExtremelyLenient}
is enabled. These tests are inspired by some HTML generated in by a mailer,
which breaks up very long lines by splitting them with '!\n '. The expected
behaviour is loosely modelled on the way Firefox treats very bad HTML.
"""
def checkParsed(self, input, expected, beExtremelyLenient=1):
"""
Check that C{input}, when parsed, produces a DOM where the XML
of the document element is equal to C{expected}.
"""
output = microdom.parseString(input,
beExtremelyLenient=beExtremelyLenient)
self.assertEqual(output.documentElement.toxml(), expected)
def test_brokenAttributeName(self):
"""
Check that microdom does its best to handle broken attribute names.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><div al!\n ign="center">Foo</div></h1></body>'
expected = ('<body><h1><div al="True" ign="center">'
'Foo</div></h1></body>')
self.checkParsed(input, expected)
def test_brokenAttributeValue(self):
"""
Check that microdom encompasses broken attribute values.
"""
input = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
expected = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
self.checkParsed(input, expected)
def test_brokenOpeningTag(self):
"""
Check that microdom does its best to handle broken opening tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><sp!\n an>Hello World!</span></h1></body>'
expected = '<body><h1><sp an="True">Hello World!</sp></h1></body>'
self.checkParsed(input, expected)
def test_brokenSelfClosingTag(self):
"""
Check that microdom does its best to handle broken self-closing tags
The important thing is that it doesn't raise an exception.
"""
self.checkParsed('<body><span /!\n></body>',
'<body><span></span></body>')
self.checkParsed('<span!\n />', '<span></span>')
def test_brokenClosingTag(self):
"""
Check that microdom does its best to handle broken closing tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><span>Hello World!</sp!\nan></h1></body>'
expected = '<body><h1><span>Hello World!</span></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</!\nspan></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</span!\n></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!<!\n/span></h1></body>'
expected = '<body><h1><span>Hello World!<!></!></span></h1></body>'
self.checkParsed(input, expected)
class NodeTests(TestCase):
"""
Tests for L{Node}.
"""
def test_isNodeEqualTo(self):
"""
L{Node.isEqualToNode} returns C{True} if and only if passed a L{Node}
with the same children.
"""
# A node is equal to itself
node = microdom.Node(object())
self.assertTrue(node.isEqualToNode(node))
another = microdom.Node(object())
# Two nodes with no children are equal
self.assertTrue(node.isEqualToNode(another))
node.appendChild(microdom.Node(object()))
# A node with no children is not equal to a node with a child
self.assertFalse(node.isEqualToNode(another))
another.appendChild(microdom.Node(object()))
# A node with a child and no grandchildren is equal to another node
# with a child and no grandchildren.
self.assertTrue(node.isEqualToNode(another))
# A node with a child and a grandchild is not equal to another node
# with a child and no grandchildren.
node.firstChild().appendChild(microdom.Node(object()))
self.assertFalse(node.isEqualToNode(another))
# A node with a child and a grandchild is equal to another node with a
# child and a grandchild.
another.firstChild().appendChild(microdom.Node(object()))
self.assertTrue(node.isEqualToNode(another))
def test_validChildInstance(self):
"""
Children of L{Node} instances must also be L{Node} instances.
"""
node = microdom.Node()
child = microdom.Node()
# Node.appendChild() only accepts Node instances.
node.appendChild(child)
self.assertRaises(TypeError, node.appendChild, None)
# Node.insertBefore() only accepts Node instances.
self.assertRaises(TypeError, node.insertBefore, child, None)
self.assertRaises(TypeError, node.insertBefore, None, child)
self.assertRaises(TypeError, node.insertBefore, None, None)
# Node.removeChild() only accepts Node instances.
node.removeChild(child)
self.assertRaises(TypeError, node.removeChild, None)
# Node.replaceChild() only accepts Node instances.
self.assertRaises(TypeError, node.replaceChild, child, None)
self.assertRaises(TypeError, node.replaceChild, None, child)
self.assertRaises(TypeError, node.replaceChild, None, None)
class DocumentTests(TestCase):
"""
Tests for L{Document}.
"""
doctype = 'foo PUBLIC "baz" "http://www.example.com/example.dtd"'
def test_isEqualToNode(self):
"""
L{Document.isEqualToNode} returns C{True} if and only if passed a
L{Document} with the same C{doctype} and C{documentElement}.
"""
# A document is equal to itself
document = microdom.Document()
self.assertTrue(document.isEqualToNode(document))
# A document without a doctype or documentElement is equal to another
# document without a doctype or documentElement.
another = microdom.Document()
self.assertTrue(document.isEqualToNode(another))
# A document with a doctype is not equal to a document without a
# doctype.
document.doctype = self.doctype
self.assertFalse(document.isEqualToNode(another))
# Two documents with the same doctype are equal
another.doctype = self.doctype
self.assertTrue(document.isEqualToNode(another))
# A document with a documentElement is not equal to a document without
# a documentElement
document.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
# Two documents with equal documentElements are equal.
another.appendChild(microdom.Node(object()))
self.assertTrue(document.isEqualToNode(another))
# Two documents with documentElements which are not equal are not
# equal.
document.documentElement.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
def test_childRestriction(self):
"""
L{Document.appendChild} raises L{ValueError} if the document already
has a child.
"""
document = microdom.Document()
child = microdom.Node()
another = microdom.Node()
document.appendChild(child)
self.assertRaises(ValueError, document.appendChild, another)
class EntityReferenceTests(TestCase):
"""
Tests for L{EntityReference}.
"""
def test_isEqualToNode(self):
"""
L{EntityReference.isEqualToNode} returns C{True} if and only if passed
a L{EntityReference} with the same C{eref}.
"""
self.assertTrue(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('quot')))
self.assertFalse(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('apos')))
class CharacterDataTests(TestCase):
"""
Tests for L{CharacterData}.
"""
def test_isEqualToNode(self):
"""
L{CharacterData.isEqualToNode} returns C{True} if and only if passed a
L{CharacterData} with the same value.
"""
self.assertTrue(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('foo')))
self.assertFalse(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('bar')))
class CommentTests(TestCase):
"""
Tests for L{Comment}.
"""
def test_isEqualToNode(self):
"""
L{Comment.isEqualToNode} returns C{True} if and only if passed a
L{Comment} with the same value.
"""
self.assertTrue(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('foo')))
self.assertFalse(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('bar')))
class TextTests(TestCase):
"""
Tests for L{Text}.
"""
def test_isEqualToNode(self):
"""
L{Text.isEqualToNode} returns C{True} if and only if passed a L{Text}
which represents the same data.
"""
self.assertTrue(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=True)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=False)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('bar', raw=True)))
class CDATASectionTests(TestCase):
"""
Tests for L{CDATASection}.
"""
def test_isEqualToNode(self):
"""
L{CDATASection.isEqualToNode} returns C{True} if and only if passed a
L{CDATASection} which represents the same data.
"""
self.assertTrue(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('foo')))
self.assertFalse(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('bar')))
class ElementTests(TestCase):
"""
Tests for L{Element}.
"""
def test_isEqualToNode(self):
"""
L{Element.isEqualToNode} returns C{True} if and only if passed a
L{Element} with the same C{nodeName}, C{namespace}, C{childNodes}, and
C{attributes}.
"""
self.assertTrue(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar')))
# Elements with different nodeName values do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'bar', {'a': 'b'}, object(), namespace='bar')))
# Elements with different namespaces do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='baz')))
# Elements with different childNodes do not compare equal.
one = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two.appendChild(microdom.Node(object()))
self.assertFalse(one.isEqualToNode(two))
# Elements with different attributes do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'c'}, object(), namespace='bar')))
| mit | -7,277,566,797,823,867,000 | 37.696577 | 90 | 0.582845 | false |
CKehl/pylearn2 | pylearn2/models/tests/test_s3c_inference.py | 44 | 14386 | from __future__ import print_function
from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from pylearn2.utils import contains_nan
from theano import function
import numpy as np
from theano.compat.six.moves import xrange
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not contains_nan(g)
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not contains_nan(g)
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print('iteration ',i)
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print('masked failures: ',mask.shape[0],' err ',g_abs_max)
if mask.sum() > 0:
print('failing h passing the range mask')
print(failing_h[ mask.astype(bool) ])
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print('failures after iteration ',i,': ',(increase > self.tol).sum())
mx = increase.max()
if mx > 1e-4:
print('increase amounts of failing examples:')
print(increase[increase > self.tol])
print('failing H:')
print(H[increase > self.tol,:])
print('failing Mu1:')
print(Mu1[increase > self.tol,:])
print('failing V:')
print(X[increase > self.tol,:])
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
| bsd-3-clause | -8,535,140,528,641,407,000 | 30.898004 | 217 | 0.536424 | false |
dvklopfenstein/PrincetonAlgorithms | tests/test_Selection.py | 1 | 2392 | #!/usr/bin/env python
import sys
from AlgsSedgewickWayne.Selection import Sort
from AlgsSedgewickWayne.testcode.ArrayHistory import chk
from AlgsSedgewickWayne.testcode.ArrayHistory import ArrayHistory
from AlgsSedgewickWayne.testcode.InputArgs import cli_get_array
def test_wk2_lec(prt=sys.stdout):
"""Example from week 2 lecture, "Selection Sort (6:59)" """
# Give the array that results after the first 4 exchanges when
# selection sorting the following array:
a = map(int, "7 10 5 3 8 4 2 9 6".split() )
run(a, 'SELECTION SORT', prt=prt)
def test_wk2_ex_Selections_489125(prt=sys.stdout):
# (seed = 183182)
# Give the array that results after the first 4 exchanges when
# selection sorting the following array:
a = map(int, "13 16 40 60 19 70 71 47 12 67".split() )
run(a, 'SELECTION SORT', prt=prt)
def test_wk2_q3a(prt=sys.stdout):
# QUESTION: Any pair of items is compared no more than once during selection sort.
# ANSWER(FALSE): Consider the array { 2, 1, 0 }. Then, 2 and 1 are compared twice.
run([2, 1, 0], 'SELECTION SORT', prt=prt)
def test_wk2_q3b(prt=sys.stdout):
# QUESTION: An exchange in selection sort can decrease the number of inversions
# by two (or more).
# ANSWER(TRUE): Consider the array { 3, 2, 1 }, which has 3 inversions. The first exchange results in the array { 1, 2, 3 }, which has zero inversions.
run([3, 2, 1], 'SELECTION SORT', prt=prt)
def test_wk2_q2a(prt=sys.stdout):
desc = 'SELECTION SORT WORDS'
prt.write("\n{TEST}\n".format(TEST=desc))
exp = "BECK BUSH DEVO EVE6 HOLE JAYZ KORN MIMS VAIN RATT TOTO PINK SADE NOFX SOAD WHAM"
a = "HOLE BUSH MIMS BECK WHAM SOAD NOFX TOTO VAIN RATT DEVO PINK SADE KORN JAYZ EVE6".split()
ah = ArrayHistory()
Sort(a, array_history=ah)
ah.show(desc)
for idx, A in enumerate(ah):
if chk( A[0], exp ):
prt.write("MATCH {I}\n".format(I=idx))
def run(a, desc=None, prt=sys.stdout):
ah = ArrayHistory()
Sort(a, array_history=ah)
if desc is None:
desc = "INSERTION SORT"
prt.write("{DESC} RESULT {A}\n".format(DESC=desc, A=' '.join(str(e) for e in a)))
ah.prt()
ah.show(desc)
def run_all():
"""Run all tests."""
test_wk2_lec()
test_wk2_ex_Selections_489125()
test_wk2_q3a()
test_wk2_q2a()
def cli():
N = len(sys.argv)
if N == 1:
run_all()
elif N == 2:
run(cli_get_array())
if __name__ == '__main__':
cli()
| gpl-2.0 | -1,045,273,861,957,242,400 | 31.767123 | 153 | 0.674331 | false |
svm-zhang/poolseq_tk | poolseq_tk.py | 1 | 12829 | import os
import sys
import argparse
import collections
import multiprocessing as mp
import glob
import subprocess
import shlex
import re
import sz_collapse
import sz_acount
import sz_mergeAC
import sz_filter
import sz_fisher
import sz_cmh
import sz_plotting
import sz_overlap
import sz_prepVCF
import sz_view
import sz_biallelic
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
flag = 0
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
sub_cmd = "\n"
for i, part in enumerate(parts.split("\n")):
if i == 0:
continue
else:
if flag == 1:
sub_cmd += 4*" "+ " ".join(filter(None, part.split(" "))) + "\n"
flag = 0
continue
if len(part.split(" ")) > 4:
if len(part.split(" ")[4]) > 7:
sub_cmd += " ".join(part.split(" ")[0:5])
flag = 1
else:
sub_cmd += " ".join(part.split(" ")[0:5]) + (9-len(part.split(" ")[4])+4)*" " + " ".join(filter(None, part.split(" "))) + "\n"
return sub_cmd
else:
return parts
def getopts():
parser = argparse.ArgumentParser(description="Toolkits for Genome-wide Association Mapping using Pooled Sequencing")
sub_parsers = parser.add_subparsers(title="Commands", metavar="", dest="command")
usage = "Viewing mpileup file (transforming 5th column in mpileup into human readable letters)"
view_parser = sub_parsers.add_parser("view", help=usage)
view_parser.add_argument("-mpileup",
metavar="FILE",
dest="ipileup",
required=True,
help="mpileup file")
view_parser.add_argument("-snp",
metavar="FILE",
dest="isnp",
required=True,
help="tab-delimited snp file with four columns: chr, pos, ref, alt")
view_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="tab-delimited file with five columns: chr, pos, ref, alt, transformed reads bases ")
view_parser.set_defaults(func=sz_view.run_view)
# Collapsing two mpileup files
usage = "Collapsing two pileup files at corresponding SNPs"
collapse_parser = sub_parsers.add_parser("collapse", help=usage)
collapse_parser.add_argument("-m1",
metavar="FILE",
dest="m1",
required="True",
help="one of the two mpileup files")
collapse_parser.add_argument("-m2",
metavar="FILE",
dest="m2",
required="True",
help="one of the two mpileup files")
collapse_parser.add_argument("-snps",
metavar="FILE",
dest="snps",
required="True",
help="a list of SNP positions. e.g. chr\\tpos")
collapse_parser.add_argument("-offset1",
metavar="INT",
dest="offset1",
type=int,
default=0,
help="offset add in for the first mpileup file specified by -m1")
collapse_parser.add_argument("-offset2",
metavar="INT",
dest="offset2",
type=int,
default=0,
help="offset add in for the second mpileup file specified by -m2")
collapse_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file. Default: STDOUT")
collapse_parser.set_defaults(func=sz_collapse.run_collapse)
usage = "Counting number of alleles given number of pileup files"
count_parser = sub_parsers.add_parser("count", help=usage)
count_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file of allele counts at each SNP. Default: STDOUT")
count_parser.add_argument("-pos",
metavar="FILE",
dest="pos",
help="file of SNPs where counting will happen")
count_parser.add_argument("pileups",
metavar="PILEUP",
nargs='+',
help="pileup files")
count_parser.set_defaults(func=sz_acount.run_count)
usage = "Getting Biallelic sites only"
biallelic_parser = sub_parsers.add_parser("biallelic", help=usage)
biallelic_parser.add_argument("-o",
metavar="FILE",
dest="out",
required=True,
help="output file of biallelic sites")
biallelic_parser.add_argument("pileups",
metavar="PILEUP",
nargs='+',
help="pileup files")
biallelic_parser.set_defaults(func=sz_biallelic.run_biallelic)
usage = "Filter SNPs that are not satisfied specified conditions"
filter_parser = sub_parsers.add_parser("filter", help=usage)
filter_parser.add_argument("-ac",
metavar="FILE",
dest="ac_file",
required=True,
help="allele counts file")
filter_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file without filtered SNPs. Default: STDOUT")
filter_parser.add_argument("-min_ref_ac",
metavar="INT",
dest="min_ref_ac",
type=int,
default=5,
help="minimum number of the ref allele (3rd column) per sample/pool")
filter_parser.add_argument("-min_alt_ac",
metavar="INT",
dest="min_alt_ac",
type=int,
default=5,
help="minimum number of the alt allele (4th column) per sample/pool")
filter_parser.add_argument("-min_cov",
metavar="INT",
dest="min_cov",
type=int,
default=10,
help="specify minimum coverage per site per sample/pool")
filter_parser.set_defaults(func=sz_filter.run_filter)
usage = "Merging allele counts from multiple replicates"
mergeAC_parser = sub_parsers.add_parser("mergeAC", help=usage)
mergeAC_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file of combined counts at each SNP across replicates")
mergeAC_parser.add_argument("acs",
metavar="ac_file",
nargs='+',
help="allele counts files")
mergeAC_parser.set_defaults(func=sz_mergeAC.run_merge)
usage = "Run Fisher's Exact Test at each SNP"
fisher_parser = sub_parsers.add_parser("fisher", help=usage)
fisher_parser.add_argument("-ac",
metavar="FILE",
dest="ac_file",
help="allele counts for one pool")
fisher_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
default="poolseq_tk.fisher",
help="output file for Fisher's Exact tests")
fisher_parser.add_argument("-t",
metavar="INT",
dest="nproc",
type=int,
default=1,
help="Specify number of processes running simultaneously")
fisher_parser.add_argument("-adj_cutoff",
metavar="FLOAT",
dest="adj_cutoff",
type=float,
default=0.05,
help="specify the cutoff below which adjusted p-values will be considered as significant")
fisher_parser.add_argument("-adj_method",
metavar="STR",
dest="adj_method",
default="fdr",
# choices=["holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"],
help="specify the adjustment methods. Only BH procedure supported")
fisher_parser.add_argument("-direction",
metavar="STR",
dest="oddsr_direction",
choices=["greater", "less"],
required=True,
help="specify whether odds ration greater, or less, than 1")
fisher_parser.set_defaults(func=sz_fisher.run_fisher)
usage="run Cochran-Mantel-Haenszel test with multi-testing adjustment"
cmh_parser = sub_parsers.add_parser("cmh", help=usage)
cmh_parser.add_argument("-ac",
metavar="FILE",
dest="table_file",
required=True,
help="output file with the table that CMH test run on")
cmh_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
default="poolseq_tk.cmh",
required=True, help="output file with CMH test results")
cmh_parser.add_argument("-t",
metavar="INT",
dest="nproc",
type=int,
default=1,
help="Specify number of processes running simultaneously")
cmh_parser.add_argument("-adj_cutoff",
metavar="FLOAT",
dest="adj_cutoff",
type=float,
default=0.05,
help="specify the cutoff below which adjusted p-values will be considered as significant")
cmh_parser.add_argument("-adj_method",
metavar="STR",
dest="adj_method",
default="BH",
choices=["holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"],
help="specify the adjustment methods")
cmh_parser.add_argument("-direction",
metavar="STR",
dest="oddsr_direction",
choices=["greater", "less"],
required=True,
help="specify whether odds ration greater, or less, than 1")
cmh_parser.set_defaults(func=sz_cmh.run_cmh)
usage = "Making Manhattan plot and QQ plot"
plot_parser = sub_parsers.add_parser("plot", help=usage)
plot_parser.add_argument("-i",
metavar="FILE",
dest="input",
required=True,
help="input file of test results with all SNPs")
plot_parser.add_argument("-highlight",
metavar="FILE",
dest="highlight_snps",
help="file of a list of SNPs to be highlighted in the Manhattan plot")
plot_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
help="prefix of output file")
plot_parser.add_argument("-pcutoff",
metavar="FLOAT",
dest="pcutoff",
type=float,
help="specify the p value cutoff to draw on the Mahhatan plot")
plot_parser.add_argument("-fdrlevel",
metavar="FLOAT",
dest="fdrlevel",
type=float,
default=0.05,
help="specify at which level FDR will be applied")
plot_parser.add_argument("-qqtitle",
metavar="STR",
dest="qqtitle",
help="specify the title for QQ plot")
plot_parser.add_argument("-manx",
metavar="STR",
dest="manx",
help="specify xlab for manhattan plot")
plot_parser.add_argument("-manxlim",
metavar="STR",
dest="manxlim",
default="-",
help="an interval defined by min and max, sperated by comma, e.g. 19,27. Default=\"-\"")
plot_parser.add_argument("-mantitle",
metavar="STR",
dest="mantitle",
help="specify the title for Manhattan plot")
plot_mutual_group = plot_parser.add_mutually_exclusive_group(required=True)
plot_mutual_group.add_argument("-pdf",
dest="pdf",
action="store_true",
help="output qqplot in pdf format")
plot_mutual_group.add_argument("-png",
dest="png",
action="store_true",
help="output qqplot in pdf format. Probably not working!")
plot_parser.set_defaults(func=sz_plotting.making_plot)
usage = "Preparing VCF file from tests result file for snpEff"
prepVCF_parser = sub_parsers.add_parser("vcf", help=usage)
prepVCF_parser.add_argument("-i",
metavar="FILE",
dest="infile",
required="True",
help="test result file generated from poolseq_tk.py fisher or poolseq_tk.py cmh")
prepVCF_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="output in VCF format.")
prepVCF_parser.add_argument("-samples",
metavar="LIST",
dest="samples",
default="table1,table2,table3,table4",
help="a list of sample names separated by comma")
prepVCF_parser.add_argument("-filter",
metavar="EXPR",
nargs='*',
dest="filters",
default=list(),
help="a set of filters to apply. Only support INFO field ratio, e.g. ratio>1")
prepVCF_parser.add_argument("-fst",
metavar="FILE",
dest="ifst",
help="a file of Fst values")
prepVCF_parser.set_defaults(func=sz_prepVCF.run_prepVCF)
adjust_parser = sub_parsers.add_parser("adjust", help="getting significant SNPs with FDR correction")
diff_parser = sub_parsers.add_parser("diff", help="get SNPs that significant in one replicate but not in the other")
usage = "Get overlaps of significant SNPs between replicates/pools"
overlap_parser = sub_parsers.add_parser("overlap", help=usage)
overlap_parser.add_argument("-a",
metavar="FILE",
dest="file_a",
help="significant SNPs identified from pool A")
overlap_parser.add_argument("-b",
metavar="FILE",
dest="file_b",
help="significant SNPs identified from pool B")
overlap_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="output file of overlapion of significant SNPs identified from both pools")
overlap_parser.set_defaults(func=sz_overlap.run_overlap)
# adjust_parser.set_defaults(func=multi_testing_correction)
# diff_parser.set_defaults(func=call_diff)
return parser.parse_args()
def main():
args = getopts()
args.func(args)
if __name__ == "__main__":
main()
| gpl-2.0 | 6,752,140,550,192,854,000 | 33.956403 | 133 | 0.628342 | false |
scottcunningham/ansible | lib/ansible/plugins/action/pause.py | 57 | 5479 | # Copyright 2012, Tim Bielawa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import sys
import time
from termios import tcflush, TCIFLUSH
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=dict()):
''' run the pause action module '''
duration_unit = 'minutes'
prompt = None
seconds = None
result = dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
)
# FIXME: not sure if we can get this info directly like this anymore?
#hosts = ', '.join(self.runner.host_set)
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
#prompt = "[%s]\nPress enter to continue:\n" % hosts
prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
pause_type = 'seconds'
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
#prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
# I have no idea what you're trying to do. But it's so wrong.
else:
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
#vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
# (self.pause_type, self.duration_unit, self.seconds, self.prompt))
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
# FIXME: this is all very broken right now, as prompting from the worker side
# is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
# probably should not be run through the executor engine at all. Also, ctrl+c
# is now captured on the parent thread, so it can't be caught here via the
# KeyboardInterrupt exception.
try:
if not pause_type == 'prompt':
print("(^C-c = continue early, ^C-a = abort)")
#print("[%s]\nPausing for %s seconds" % (hosts, seconds))
print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
time.sleep(seconds)
else:
# Clear out any unflushed buffered input which would
# otherwise be consumed by raw_input() prematurely.
#tcflush(sys.stdin, TCIFLUSH)
result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
except KeyboardInterrupt:
while True:
print('\nAction? (a)bort/(c)ontinue: ')
c = getch()
if c == 'c':
# continue playbook evaluation
break
elif c == 'a':
# abort further playbook evaluation
raise ae('user requested abort!')
finally:
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
| gpl-3.0 | -6,424,252,642,390,632,000 | 39.286765 | 118 | 0.556488 | false |
SnakeJenny/TensorFlow | tensorflow/python/ops/init_ops.py | 23 | 19144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(False if dtype is dtypes.bool else 0,
dtype=dtype, shape=shape)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(1, dtype=dtype, shape=shape)
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
self.value = value
self.dtype = dtype
self.verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(self.value, dtype=dtype, shape=shape,
verify_shape=self.verify_shape)
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(shape, self.minval, self.maxval,
dtype, seed=self.seed)
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=self.seed)
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal":
stddev = math.sqrt(scale)
return random_ops.truncated_normal(shape, 0.0, stddev,
dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=self.seed)
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, i is initialized
with an orthogonal matrix obtained from the singular value decomposition of a
matrix of uniform random numbers.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32, seed=None):
self.gain = gain
self.dtype = _assert_float_dtype(dtype)
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
# Compute the svd
_, u, v = linalg_ops.svd(a, full_matrices=False)
# Pick the appropriate singular value decomposition
if num_rows > num_cols:
q = u
else:
# Tensorflow departs from numpy conventions
# such that we need to transpose axes here
q = array_ops.transpose(v)
return self.gain * array_ops.reshape(q, shape)
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
orthogonal_initializer = Orthogonal
# pylint: enable=invalid-name
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Arguments:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
| apache-2.0 | 4,785,631,063,474,566,000 | 34.517625 | 80 | 0.661356 | false |
Permutatrix/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/config.py | 166 | 45047 | """ command line options, ini-file and conftest.py processing. """
import argparse
import shlex
import traceback
import types
import warnings
import py
# DON't import pytest here because it causes import cycle troubles
import sys, os
import _pytest._code
import _pytest.hookspec # the extension point definitions
from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
# pytest startup
#
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
tw = py.io.TerminalWriter(sys.stderr)
for line in traceback.format_exception(*e.excinfo):
tw.line(line.rstrip(), red=True)
tw.line("ERROR: could not load %s\n" % (e.path), red=True)
return 4
else:
try:
config.pluginmanager.check_pending()
return config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
except UsageError as e:
for msg in e.args:
sys.stderr.write("ERROR: %s\n" %(msg,))
return 4
class cmdline: # compatibility namespace
main = staticmethod(main)
class UsageError(Exception):
""" error in pytest usage or invocation"""
_preinit = []
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
"junitxml resultlog doctest cacheprovider").split()
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def _preloadplugins():
assert not _preinit
_preinit.append(get_config())
def get_config():
if _preinit:
return _preinit.pop(0)
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(pluginmanager)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args)
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
self._conftest_plugins = set()
# state related to local conftest plugins
self._path2confmods = {}
self._conftestpath2mod = {}
self._confcutdir = None
self._noconftest = False
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get('PYTEST_DEBUG'):
err = sys.stderr
encoding = getattr(err, 'encoding', 'utf8')
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
def addhooks(self, module_or_class):
"""
.. deprecated:: 2.8
Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
"""
warning = dict(code="I2",
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
nodeid=None,
message="use pluginmanager.add_hookspecs instead of "
"deprecated addhooks() method.")
self._warn(warning)
return self.add_hookspecs(module_or_class)
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore some historic special names which can not be hooks anyway
if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
return
method = getattr(plugin, name)
opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
if opts is not None:
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name))
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super(PytestPluginManager, self).parse_hookspec_opts(
module_or_class, name)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
opts = {"firstresult": hasattr(method, "firstresult"),
"historic": hasattr(method, "historic")}
return opts
def _verify_hook(self, hook, hookmethod):
super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
if "__multicall__" in hookmethod.argnames:
fslineno = _pytest._code.getfslineno(hookmethod.function)
warning = dict(code="I1",
fslocation=fslineno,
nodeid=None,
message="%r hook uses deprecated __multicall__ "
"argument" % (hook.name))
self._warn(warning)
def register(self, plugin, name=None):
ret = super(PytestPluginManager, self).register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self))
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line("markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.")
config.addinivalue_line("markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.")
def _warn(self, message):
kwargs = message if isinstance(message, dict) else {
'code': 'I1',
'message': message,
'fslocation': None,
'nodeid': None,
}
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = current.join(namespace.confcutdir, abs=True) \
if namespace.confcutdir else None
self._noconftest = namespace.noconftest
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
def _getconftestmodules(self, path):
if self._noconftest:
return []
try:
return self._path2confmods[path]
except KeyError:
if path.isfile():
clist = self._getconftestmodules(path.dirpath())
else:
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in path.parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._path2confmods[path] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[conftestpath] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._path2confmods:
for path, mods in self._path2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loaded conftestmodule %r" %(mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args):
for opt1,opt2 in zip(args, args[1:]):
if opt1 == "-p":
self.consider_pluginarg(opt2)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
def consider_conftest(self, conftestmodule):
if self.register(conftestmodule, name=conftestmodule.__file__):
self.consider_module(conftestmodule)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, "pytest_plugins", None))
def _import_plugin_specs(self, spec):
if spec:
if isinstance(spec, str):
spec = spec.split(",")
for import_spec in spec:
self.import_plugin(import_spec)
def import_plugin(self, modname):
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, str)
if self.get_plugin(modname) is not None:
return
if modname in builtin_plugins:
importspec = "_pytest." + modname
else:
importspec = modname
try:
__import__(importspec)
except ImportError as e:
new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e))
# copy over name and path attributes
for attr in ('name', 'path'):
if hasattr(e, attr):
setattr(new_exc, attr, getattr(e, attr))
raise new_exc
except Exception as e:
import pytest
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
raise
self._warn("skipped plugin %r: %s" %((modname, e.msg)))
else:
mod = sys.modules[importspec]
self.register(mod, modname)
self.consider_module(mod)
class Parser:
""" Parser for command line arguments and ini-file values.
:ivar extra_info: dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.extra_info = {}
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i+1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`argparse library
<http://docs.python.org/2/library/argparse.html>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args, namespace=None):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
return optparser
def parse_setoption(self, args, option, namespace=None):
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments at this
point.
"""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments, and
the remaining arguments unknown at this point.
"""
optparser = self._getparser()
args = [str(x) for x in args]
return optparser.parse_known_args(args, namespace=namespace)
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument:
"""class that mimics the necessary behaviour of optparse.Option """
_typ_map = {
'int': int,
'string': str,
}
# enable after some grace period for plugin writers
TYPE_WARN = False
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get('dest')
if self.TYPE_WARN:
try:
help = attrs['help']
if '%default' in help:
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
FutureWarning,
stacklevel=3)
except KeyError:
pass
try:
typ = attrs['type']
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, py.builtin._basestring):
if typ == 'choice':
if self.TYPE_WARN:
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this is optional and when supplied '
' should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
# argparse expects a type here take it from
# the type of the first element
attrs['type'] = type(attrs['choices'][0])
else:
if self.TYPE_WARN:
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
attrs['type'] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs['type']
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs['default']
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError(
'need a long or short option', self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = 'default dest help'.split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get('help'):
a = self._attrs['help']
a = a.replace('%default', '%(default)s')
#a = a.replace('%prog', '%(prog)s')
self._attrs['help'] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def __repr__(self):
retval = 'Argument('
if self._short_opts:
retval += '_short_opts: ' + repr(self._short_opts) + ', '
if self._long_opts:
retval += '_long_opts: ' + repr(self._long_opts) + ', '
retval += 'dest: ' + repr(self.dest) + ', '
if hasattr(self, 'type'):
retval += 'type: ' + repr(self.type) + ', '
if hasattr(self, 'default'):
retval += 'default: ' + repr(self.default) + ', '
if retval[-2:] == ', ': # always long enough to test ("Argument(" )
retval = retval[:-2]
retval += ')'
return retval
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == '-' and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(self, parser, extra_info=None):
if not extra_info:
extra_info = {}
self._parser = parser
argparse.ArgumentParser.__init__(self, usage=parser._usage,
add_help=False, formatter_class=DropShorterLongHelpFormatter)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user
self.extra_info = extra_info
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == '-':
lines = ['unrecognized arguments: %s' % (' '.join(argv))]
for k, v in sorted(self.extra_info.items()):
lines.append(' %s: %s' % (k, v))
self.error('\n'.join(lines))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != '-': # only optional arguments
return orgstr
res = getattr(action, '_formatted_action_invocation', None)
if res:
return res
options = orgstr.split(', ')
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, 'map_long_option', {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == ' ':
continue
if not option.startswith('--'):
raise ArgumentError('long optional argument without "--": [%s]'
% (option), self)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace('-', '')
if shortened not in short_long or \
len(short_long[shortened]) < len(xxoption):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options: #
if len(option) == 2 or option[2] == ' ':
return_list.append(option)
if option[2:] == short_long.get(option.replace('-', '')):
return_list.append(option.replace(' ', '='))
action._formatted_action_invocation = ', '.join(return_list)
return action._formatted_action_invocation
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class CmdOptions(object):
""" holds cmdline options as attributes."""
def __init__(self, values=()):
self.__dict__.update(values)
def __repr__(self):
return "<CmdOptions %r>" %(self.__dict__,)
def copy(self):
return CmdOptions(self.__dict__)
class Notset:
def __repr__(self):
return "<NOTSET>"
notset = Notset()
FILE_OR_DIR = 'file_or_dir'
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = CmdOptions()
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {}
self._opt2dest = {}
self._cleanup = []
self._warn = self.pluginmanager._warn
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
def do_setns(dic):
import pytest
setns(pytest, dic)
self.hook.pytest_namespace.call_historic(do_setns, {})
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
def add_cleanup(self, func):
""" Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
def _do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self):
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def warn(self, code, message, fslocation=None):
""" generate a warning for this test session. """
self.hook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
fslocation=fslocation, nodeid=None))
def get_terminal_writer(self):
return self.pluginmanager.get_plugin("terminalreporter")._tw
def pytest_cmdline_parse(self, pluginmanager, args):
# REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(funcargs=True,
showlocals=getattr(option, 'showlocals', False),
style=style,
)
res = self.hook.pytest_internalerror(excrepr=excrepr,
excinfo=excinfo)
if not py.builtin.any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" %line)
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid):
# nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
return nodeid
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
config = get_config()
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, 'default') and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config):
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args):
ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args)
self.rootdir, self.inifile, self.inicfg = r
self._parser.extra_info['rootdir'] = self.rootdir
self._parser.extra_info['inifile'] = self.inifile
self.invocation_dir = py.path.local()
self._parser.addini('addopts', 'extra command line options', 'args')
self._parser.addini('minversion', 'minimally required pytest version')
def _preparse(self, args, addopts=True):
self._initini(args)
if addopts:
args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
args[:] = self.getini("addopts") + args
self._checkversion()
self.pluginmanager.consider_preparse(args)
try:
self.pluginmanager.load_setuptools_entrypoints("pytest11")
except ImportError as e:
self.warn("I2", "could not load setuptools entry import: %s" % (e,))
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
if self.known_args_namespace.confcutdir is None and self.inifile:
confcutdir = py.path.local(self.inifile).dirname
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(early_config=self,
args=args, parser=self._parser)
except ConftestImportFailure:
e = sys.exc_info()[1]
if ns.help or ns.version:
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self._warn("could not load initial conftests (%s)\n" % e.path)
else:
raise
def _checkversion(self):
import pytest
minver = self.inicfg.get('minversion', None)
if minver:
ver = minver.split(".")
myver = pytest.__version__.split(".")
if myver < ver:
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'" %(
self.inicfg.config.path, self.inicfg.lineof('minversion'),
minver, pytest.__version__))
def parse(self, args, addopts=True):
# parse given cmdline arguments into this config object.
assert not hasattr(self, 'args'), (
"can only parse cmdline args at most once per Config object")
self._origargs = args
self.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=self.pluginmanager))
self._preparse(args, addopts=addopts)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
if not args:
cwd = os.getcwd()
if cwd == self.rootdir:
args = self.getini('testpaths')
if not args:
args = [cwd]
self.args = args
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" %(name,))
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ''
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
l = []
for relpath in shlex.split(value):
l.append(dp.join(relpath, abs=True))
return l
elif type == "args":
return shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path):
try:
mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
l = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
l.append(relroot)
return l
def getoption(self, name, default=notset, skip=False):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
:arg default: default value if no option of that name exists.
:arg skip: if True raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if val is None and skip:
raise AttributeError(name)
return val
except AttributeError:
if default is not notset:
return default
if skip:
import pytest
pytest.skip("no %r option found" %(name,))
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" (deprecated, use getoption()) """
return self.getoption(name)
def getvalueorskip(self, name, path=None):
""" (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True)
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, inibasenames):
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if 'pytest' in iniconfig.sections:
return base, p, iniconfig['pytest']
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(args):
# args are what we get after early command line parsing (usually
# strings, but can be py.path.local objects as well)
common_ancestor = None
for arg in args:
if str(arg)[0] == "-":
continue
p = py.path.local(arg)
if common_ancestor is None:
common_ancestor = p
else:
if p.relto(common_ancestor) or p == common_ancestor:
continue
elif common_ancestor.relto(p):
common_ancestor = p
else:
shared = p.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif not common_ancestor.isdir():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def determine_setup(inifile, args):
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
try:
inicfg = iniconfig["pytest"]
except KeyError:
inicfg = None
rootdir = get_common_ancestor(args)
else:
ancestor = get_common_ancestor(args)
rootdir, inifile, inicfg = getcfg(
[ancestor], ["pytest.ini", "tox.ini", "setup.cfg"])
if rootdir is None:
for rootdir in ancestor.parts(reverse=True):
if rootdir.join("setup.py").exists():
break
else:
rootdir = ancestor
return rootdir, inifile, inicfg or {}
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
#if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
def create_terminal_writer(config, *args, **kwargs):
"""Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function.
"""
tw = py.io.TerminalWriter(*args, **kwargs)
if config.option.color == 'yes':
tw.hasmarkup = True
if config.option.color == 'no':
tw.hasmarkup = False
return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
| mpl-2.0 | 5,937,829,036,005,547,000 | 36.791107 | 106 | 0.565276 | false |
josephnoir/RIOT | cpu/esp32/gen_esp32part.py | 15 | 17009 | #!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import struct
import sys
import hashlib
import binascii
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
SHA256_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for SHA256 sum
__version__ = '1.0'
quiet = False
sha256sum = True
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
if not quiet:
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
line_num = range(len(lines))
for line_no in line_num:
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = 0x5000 # first offset after partition table
for e in res:
if e.offset is None:
pad_to = 0x10000 if e.type == PartitionDefinition.APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check for overlaps
last = None
for p in sorted(self, key=lambda x: x.offset):
if p.offset < 0x5000:
raise InputError("Partition offset 0x%x is below 0x5000" % p.offset)
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset,
last.offset,
last.offset+last.size-1))
last = p
def flash_size(self):
""" Return the size that partitions will occupy in flash
(ie the offset the last partition ends at)
"""
try:
last = sorted(self, reverse=True)[0]
except IndexError:
return 0 # empty table!
return last.offset + last.size
@classmethod
def from_binary(cls, b):
sha256 = hashlib.sha256()
result = cls()
for o in range(0, len(b), 32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
if sha256sum and data[:2] == SHA256_PARTITION_BEGIN[:2]: # check only the magic number part
if data[16:] == sha256.digest():
continue # the next iteration will check for the end marker
else:
raise InputError("SHA256 checksums don't match! "
"(computed: 0x%s, parsed: 0x%s)" % (sha256.hexdigest(),
binascii.hexlify(data[16:])))
else:
sha256.update(data)
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
# to satisfy Cadacy, was: if sha256sum:
# to satisfy Cadacy, was: result += SHA256_PARTITION_BEGIN + hashlib.sha256(result).digest()
if sha256sum:
result += SHA256_PARTITION_BEGIN + hashlib.sha256(result).digest()
if len(result) >= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = ["# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags"]
rows += [x.to_csv(simple_formatting) for x in self]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app": APP_TYPE,
"data": DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE: {
"factory": 0x00,
"test": 0x20,
},
DATA_TYPE: {
"ota": 0x00,
"phy": 0x01,
"nvs": 0x02,
"coredump": 0x03,
"esphttpd": 0x80,
"fat": 0x81,
"spiffs": 0x82,
},
}
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE: 0x10000,
DATA_TYPE: 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted": 0
}
# add subtypes for the 16 OTA slot values ("ota_XXX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [f.strip() for f in line_w_defaults.split(",")]
res = PartitionDefinition()
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type,
self.subtype or 0,
maybe_hex(self.offset),
maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type,
self.subtype, self.offset or -1,
self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, self.TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, self.SUBTYPES.get(self.type, {}))
@classmethod
def parse_address(cls, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval, {})
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
STRUCT_FORMAT = "<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag, bit in cls.FLAGS.items():
if flags & (1 << bit):
setattr(res, flag, True)
flags &= ~(1 << bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [flag for flag in self.FLAGS.keys() if getattr(self, flag)]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [(0x100000, "M"), (0x400, "K")]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k, v in keywords.items():
if simple_formatting is False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([self.name,
lookup_keyword(self.type, self.TYPES),
lookup_keyword(self.subtype, self.SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [("k", 1024), ("m", 1024*1024)]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
global sha256sum
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--flash-size',
help='Optional flash size limit, checks partition table fits in flash',
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB'])
parser.add_argument('--disable-sha256sum', help='Disable sha256 checksum for the partition table',
default=False, action='store_true')
parser.add_argument('--verify', '-v', help='Verify partition table fields',
default=True, action='store_false')
parser.add_argument('--quiet', '-q', help="Don't print status messages to stderr",
action='store_true')
parser.add_argument('input',
help='Path to CSV or binary file to parse. Will use stdin if omitted.',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use '
'stdout if omitted, unless the --display argument is also passed (in '
'which case only the summary is printed.)',
nargs='?',
default='-')
args = parser.parse_args()
quiet = args.quiet
sha256sum = not args.disable_sha256sum
input_arg = args.input.read()
input_is_binary = input_arg[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input_arg)
else:
input_arg = input_arg.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input_arg)
if args.verify:
status("Verifying table...")
table.verify()
if args.flash_size:
size_mb = int(args.flash_size.replace("MB", ""))
size = size_mb * 1024 * 1024 # flash memory uses honest megabytes!
table_size = table.flash_size()
if size < table_size:
raise InputError("Partitions defined in '%s' occupy %.1fMB of flash (%d bytes) which "
"does not fit in configured flash size %dMB. Change the flash size "
"in menuconfig under the 'Serial Flasher Config' menu." %
(args.input.name, table_size / 1024.0 / 1024.0, table_size, size_mb))
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
with sys.stdout.buffer if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| lgpl-2.1 | 6,493,035,867,249,300,000 | 36.547461 | 110 | 0.543301 | false |
mfalcon/edujango | edujango/static/admin/js/compress.py | 784 | 1896 | #!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| apache-2.0 | 1,626,191,519,628,529,000 | 39.340426 | 123 | 0.602321 | false |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/numpy/distutils/from_template.py | 20 | 7826 | #!/usr/bin/python
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
| apache-2.0 | 4,657,998,032,828,972,000 | 29.570313 | 95 | 0.563762 | false |
Audacity-Team/Audacity | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/suncxx.py | 134 | 1459 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_sxx(conf):
v=conf.env
cc=None
if v['CXX']:cc=v['CXX']
elif'CXX'in conf.environ:cc=conf.environ['CXX']
if not cc:cc=conf.find_program('CC',var='CXX')
if not cc:cc=conf.find_program('c++',var='CXX')
if not cc:conf.fatal('Could not find a Sun C++ compiler')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v['CXX']=cc
v['CXX_NAME']='sun'
@conf
def sxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cxxshlib']=['-G']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_sxx()
conf.find_ar()
conf.sxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| mit | 3,337,698,530,215,367,000 | 26.018519 | 102 | 0.637423 | false |
malena/bedrock | scripts/check_calendars.py | 18 | 1596 | #!/usr/bin/env python
import os
from icalendar import Calendar
def get_ics(filename):
return filename.endswith('ics')
def check_if_correct_parse(ics_file):
fh = open(ics_file, 'rb')
try:
# some calendars, such as Austrian ones have multiple
# vCalendar entries - we probably don't want them to fail
# parse. So we set multiple=True below
cal_entries = Calendar.from_ical(fh.read(), multiple=True)
if cal_entries is None:
raise ValueError
finally:
fh.close()
def run(*args):
calendars_dir = os.path.join('media','caldata')
ics_files = map(lambda x: os.path.join(calendars_dir, x),
filter(get_ics, os.listdir(calendars_dir)))
format_str = "Failed to parse the icalendar file: {}. {}"
check_failed = False
for f in ics_files:
try:
check_if_correct_parse(f)
except ValueError as ve:
check_failed = True
print format_str.format(f, ve.message)
if check_failed:
# Returning a positive error code, since we have nothing to do
# with these errors. They simply have to be reported back to
# caldata maintainers. Also, we have to return something
# other than zero - for travis to fail build over invalid files.
# Please see: http://docs.travis-ci.com/user/build-lifecycle/
# """
# When any of the steps in the script stage fails with a non-zero
# exit code, the build will be marked as failed.
# """
exit(1)
# vim: ts=4 sw=4 et ai
| mpl-2.0 | 6,486,856,726,170,703,000 | 29.692308 | 76 | 0.609649 | false |
LLNL/spack | lib/spack/spack/cmd/modules/lmod.py | 5 | 1702 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import functools
import os
import llnl.util.filesystem
import spack.cmd.common.arguments
import spack.cmd.modules
def add_command(parser, command_dict):
lmod_parser = parser.add_parser(
'lmod', help='manipulate hierarchical module files'
)
sp = spack.cmd.modules.setup_parser(lmod_parser)
# Set default module file for a package
setdefault_parser = sp.add_parser(
'setdefault', help='set the default module file for a package'
)
spack.cmd.common.arguments.add_common_arguments(
setdefault_parser, ['constraint']
)
callbacks = dict(spack.cmd.modules.callbacks.items())
callbacks['setdefault'] = setdefault
command_dict['lmod'] = functools.partial(
spack.cmd.modules.modules_cmd, module_type='lmod', callbacks=callbacks
)
def setdefault(module_type, specs, args):
"""Set the default module file, when multiple are present"""
# For details on the underlying mechanism see:
#
# https://lmod.readthedocs.io/en/latest/060_locating.html#marking-a-version-as-default
#
spack.cmd.modules.one_spec_or_raise(specs)
writer = spack.modules.module_types['lmod'](specs[0])
module_folder = os.path.dirname(writer.layout.filename)
module_basename = os.path.basename(writer.layout.filename)
with llnl.util.filesystem.working_dir(module_folder):
if os.path.exists('default') and os.path.islink('default'):
os.remove('default')
os.symlink(module_basename, 'default')
| lgpl-2.1 | 3,430,705,824,732,387,000 | 33.04 | 90 | 0.703878 | false |
ming0627/foursquared.eclair | util/gen_parser.py | 262 | 4392 | #!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna ([email protected])
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| apache-2.0 | 1,405,922,200,652,339,000 | 28.47651 | 116 | 0.644581 | false |
hknyldz/pisitools | pisilinux/pisilinux/cli/listnewest.py | 1 | 2965 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2009, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import optparse
import gettext
__trans = gettext.translation('pisilinux', fallback=True)
_ = __trans.ugettext
import pisilinux.cli.command as command
import pisilinux.context as ctx
import pisilinux.api
import pisilinux.db
class ListNewest(command.Command, metaclass=command.autocommand):
__doc__ = _("""List newest packages in the repositories
Usage: list-newest [ <repo1> <repo2> ... repon ]
Gives a list of pisilinux newly published packages in the specified
repositories. If no repository is specified, we list the new
packages from all repositories.
""")
def __init__(self, args):
super(ListNewest, self).__init__(args)
self.componentdb = pisilinux.db.componentdb.ComponentDB()
self.packagedb = pisilinux.db.packagedb.PackageDB()
name = ("list-newest", "ln")
def options(self):
group = optparse.OptionGroup(self.parser, _("list-newest options"))
group.add_option("-s", "--since", action="store",
default=None, help=_("List new packages added to repository after this given date formatted as yyyy-mm-dd"))
group.add_option("-l", "--last", action="store",
default=None, help=_("List new packages added to repository after last nth previous repository update"))
self.parser.add_option_group(group)
def run(self):
self.init(database = True, write = False)
if self.args:
for arg in self.args:
self.print_packages(arg)
else:
# print for all repos
for repo in pisilinux.api.list_repos():
self.print_packages(repo)
def print_packages(self, repo):
if ctx.config.get_option('since'):
since = ctx.config.get_option('since')
elif ctx.config.get_option('last'):
since = pisilinux.db.historydb.HistoryDB().get_last_repo_update(int(ctx.config.get_option('last')))
else:
since = None
l = pisilinux.api.list_newest(repo, since)
if not l:
return
if since:
ctx.ui.info(_("Packages added to %s since %s:\n") % (repo, since))
else:
ctx.ui.info(_("Packages added to %s:") % (repo))
# maxlen is defined dynamically from the longest package name (#9021)
maxlen = max([len(_p) for _p in l])
l.sort()
for p in l:
package = self.packagedb.get_package(p, repo)
lenp = len(p)
p = p + ' ' * max(0, maxlen - lenp)
ctx.ui.info('%s - %s ' % (p, str(package.summary)))
print()
| gpl-3.0 | 708,454,930,612,070,500 | 31.944444 | 139 | 0.611467 | false |
dictoon/blenderseed | logger.py | 2 | 1883 | #
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2018 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import bpy
__logger = None
__mapping = {'debug': logging.DEBUG,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def get_logger():
global __logger
if not __logger:
__logger = logging.getLogger(__name__)
__logger.addHandler(logging.StreamHandler())
log_level = bpy.context.preferences.addons['blenderseed'].preferences.log_level
__logger.setLevel(__mapping[log_level])
return __logger
def set_logger_level(level):
__logger.setLevel(__mapping[level])
| mit | 2,159,758,539,076,128,300 | 32.625 | 87 | 0.7265 | false |
RasPlex/plex-home-theatre | plex/scripts/merge_translations.py | 5 | 2008 | #!/usr/bin/env python
import sys, os, shutil
lang_map = {
"af-ZA": "Afrikaans",
"cs-CZ": "Czech",
"da": "Danish",
"de": "German",
"en": "English (US)",
"es": "Spanish",
"es-419" : "Spanish (Argentina)",
"fi": "Finnish",
"fr": "French",
"grk": "Greek",
"he": "Hebrew",
"hr-HR": "Croatian",
"is-IS": "Icelandic",
"it": "Italian",
"ko": "Korean",
"lt": "Latvian",
"nl": "Dutch",
"no": "Norwegian",
"pl-PL": "Polish",
"pt-BR": "Portuguese (Brazil)",
"pt-PT": "Portuguese",
"ru": "Russian",
"sr": "Serbian",
"sv": "Swedish",
"zh-CN": "Chinese (Simple)"
}
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Need two arguments"
sys.exit(1)
d = sys.argv[1]
dest = sys.argv[2]
if not os.path.isdir(d):
print "%s is not a dir!" % d
sys.exit(1)
if not os.path.isdir(dest):
print "%s is not a xbmc lang dir" % dest
sys.exit(1)
langdir = os.path.join(dest, "language")
skinlangdir = os.path.join(dest, "addons", "skin.plex", "language")
if not os.path.isdir(langdir) or not os.path.isdir(skinlangdir):
print "Can't find %s and %s" % (langdir, skinlangdir)
sys.exit(1)
for l in os.listdir(d):
if not l in lang_map:
print "Can't find mapping for %s" % l
continue
xlang = lang_map[l]
xlang += "_plex"
xlangfile = os.path.join(langdir, xlang, "strings.po")
xskinlangfile = os.path.join(skinlangdir, xlang, "strings.po")
ld = os.path.join(d, l)
pofile = os.path.join(ld, "strings_%s.po" % l)
spofile = os.path.join(ld, "string_skin_%s.po" % l)
if os.path.exists(pofile):
if not os.path.isdir(os.path.join(langdir, xlang)):
print "Can't find dir %s" % os.path.join(langdir, xlang)
else:
print "%s->%s" % (pofile, xlangfile)
shutil.copyfile(pofile, xlangfile)
if os.path.exists(spofile):
if not os.path.isdir(os.path.join(skilangdir, xlang)):
print "Can't find dir %s" % os.path.join(skinlangdir, xlang)
else:
print "%s->%s" % (spofile, xskinlangfile)
shutil.copyfile(spofile, xskinlangfile)
| gpl-2.0 | 1,667,632,191,936,396,800 | 22.623529 | 68 | 0.607072 | false |
jamestwebber/scipy | scipy/integrate/_ode.py | 2 | 48014 | # Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
from __future__ import division, print_function, absolute_import
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz.
# To wrap cvode to Python, one must write the extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag, vstack
from . import vode as _vode
from . import _dop
from . import lsoda as _lsoda
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode(object):
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: [email protected], [email protected]
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError('Function to integrate must not return a tuple.')
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success, while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this Python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian.
miter == 2: Chord iteration with internally computed full Jacobian.
miter == 3: Chord iteration with internally computed diagonal Jacobian.
miter == 4: Chord iteration with user-supplied banded Jacobian.
miter == 5: Chord iteration with internally computed banded Jacobian.
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a Jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super(self.__class__, self).__init__(rtol, atol, nsteps, max_step,
first_step, safety, ifactor,
dfactor, beta, method,
verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| bsd-3-clause | -8,872,592,165,447,722,000 | 34.021152 | 90 | 0.540655 | false |
hubig/CSCI121-Final-Project | poetry_gen.py | 1 | 7225 | #the_poetry_generator 2017
import random #needed for random selection of words
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import os
def main():
#"""Opens up one of the two random files."""
poem = open("Poem_Generator.txt","w") #Opens up new file "Poem_Generator.txt"
sentence = []
for i in range(5): #Create 5 sentences
sentence.append(create_sentence())
poem.write(sentence[i])
poem.write("\n")
poem.close()
def create_sentence():
#Articles
articles1 = ["the","an"]
articles2 = ['the','a']
articles3 = ['The','An']
articles4 = ['The',"A"]
#Subject
animal = open("Animals.txt", "r") #Opens up the animals string
animal_list =animal.readline().split(",") #Splits the string into a list
subject = animal_list[random.randrange(0,len(animal_list))] #Subject is a random word
#Verb
verb = open("Verbs.txt","r") #Opens verbs
verb_list = verb.readline().split(",")
verbs = verb_list[random.randrange(0,len(verb_list))] #verbs is random verb
#Object
if (random.randrange(1,2) == 1): #if a random number between 1 and 2 is equal to 1:
object_file = open("Objects.txt","r") #we choose an objects.txt entry as an object
object_list = object_file.readline().split(",")
objects = object_list[random.randrange(0,len(object_list))] #random object
else:
objects = animal_list[random.randrange(0,len(animal_list))] #object is an animal entry
#chooses a random adjective
adj = open("Adj.txt","r")
adj_list = adj.readline().split(",")
adjs = adj_list[random.randrange(0,len(adj_list))]
if adjs[0] in "aeiouAEIOU":
Article = articles3[random.randrange(0,len(articles1))] #if adjective begins with vowel, article is either the or a
else:
Article = articles4[random.randrange(0,len(articles2))]
# Noun Phrase + Object Phrase
nounphrase = noun_phrase(subject,adjs) #nounphrase is a concatenation of the article, the adjective, and the subject
if objects[0] in "aeiouAEIOU":
articles = articles1[random.randrange(0,len(articles1))] #if adjective begins with vowel, article is either the or a
else:
articles = articles2[random.randrange(0,len(articles2))]
objectphrase = obj_phrase(objects)
#adverbs
adv = open("Adverbs.txt")
adv_list = adv.readline().split(",")
advs = adv_list[random.randrange(0,len(adv_list))]
#Creates the verb phrase and decides the present ending of the verb depending on the object of the sentence
if verbs[len(verbs)-1] == 's' or verbs[len(verbs)-1] == 'h':
verbs = verbs +("es")
else:
verbs = verbs + 's'
verbphrase = verb_phrase(verbs,advs)
#close all the open files
animal.close()
verb.close()
object_file.close()
adj.close()
adv.close()
return Article+" "+repr(nounphrase) + repr(verbphrase) + " " + articles + " "+ repr(objectphrase) #return the sentence
class noun_phrase:
def __init__(noun,word,adj):
noun.x = word
noun.y = adj
def getNoun(noun):
"""Gets the noun"""
return noun.x
def getAdj(noun):
"""Gets the adjective"""
return noun.y
def __repr__(noun):
return str(noun.y)+" "+str(noun.x)+" "
class verb_phrase:
def __init__(verb,word,adv):
verb.x = word
verb.y = adv
def getVerb(verb):
return verb.x
def getAdv(verb):
return verb.y
def __repr__(verb):
return str(verb.y) + " " + str(verb.x)
class obj_phrase:
def __init__(obj,word):
obj.x = word
def getWord(obj):
return obj.x
def __repr__(obj):
return str(obj.x) + "."
class user_gui:
def __init__(self):
self.create_window() #creates window with title
self.create_widgets() #creates widgets
def open_file(self):
"""opens and returns poem text"""
f = open("Poem_Generator.txt", "r")
poems = f.read()
return poems
def create_window(self):
"""creates the window."""
self.root= tk.Tk() #creating window
self.root.title("Poem Generator")
def create_widgets(self):
"""creates all the widgets and their frames."""
s = ttk.Style() #using ttk style
s.configure('.', font=('Helvetica', 12), sticky=tk.N+tk.E+tk.S+tk.W)
"""ABOUT"""
about_frame = ttk.Frame(self.root, width = 240, height = 300)
about_frame.grid(row = 1, column = 1, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
about_frame.columnconfigure(0, weight = 1)
about_frame.rowconfigure(0, weight = 1)
about_text = """ABOUT
This is a random poem generator created by Charlie Carlson, Iain Irwin, and Nic Hubig for the CSCI121 final project."""
about_label = ttk.Label(about_frame, wraplength = 240, text = about_text)
about_label.grid(row = 0, column = 0, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
about_label.columnconfigure(0, weight = 1)
about_label.rowconfigure(0, weight = 1)
"""POETRY"""
poetry_frame = ttk.Frame(self.root, width = 240, height = 300)
poetry_frame.grid(row = 1, column = 2)
poetry_text = self.open_file()
poetry_label = ttk.Label(poetry_frame, wraplength = 240, text = poetry_text)
poetry_label.grid(row = 0, column = 0, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
poetry_label.columnconfigure(0, weight = 1)
poetry_label.rowconfigure(0, weight = 1)
"""GENERATE BUTTON"""
generate = ttk.Button(self.root, text="Generate poetry")
generate.grid(row=3, column= 1)
generate.columnconfigure(0, weight = 1)
generate.rowconfigure(0, weight = 1)
"""QUIT BUTTON"""
quit_button = ttk.Button(self.root, text="Quit")
quit_button.grid(row=3, column=2)
quit_button['command'] = self.root.destroy
program = user_gui()
program.root.mainloop()
| apache-2.0 | 365,445,961,883,083,700 | 37.917127 | 127 | 0.510588 | false |
zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path270.py | 1 | 2778 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.destroy_vm, 'vm1'],
[TestAction.recover_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup1'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1'],
[TestAction.stop_vm, 'vm3'],
[TestAction.start_vm, 'vm3'],
[TestAction.start_vm, 'vm1'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.delete_volume, 'volume2'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume3'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup2'],
[TestAction.delete_volume_backup, 'volume3-backup2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup3'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_image_from_volume, 'vm3', 'vm3-image2'],
[TestAction.detach_volume, 'volume4'],
[TestAction.create_volume, 'volume5', 'cluster=cluster2', 'flag=thin,scsi'],
[TestAction.use_volume_backup, 'volume4-backup3'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_volume, 'volume3'],
[TestAction.expunge_volume, 'volume3'],
[TestAction.destroy_vm, 'vm3'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup4'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_vm_backup, 'vm2-backup1'],
[TestAction.start_vm, 'vm2'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:[]
Enadbled:['vm2-backup1', 'volume4-backup3', 'volume4-backup4', 'vm3-image2']
attached:['volume1', 'volume4']
Detached:['volume5']
Deleted:['vm3', 'volume2', 'volume3-backup2']
Expunged:['volume3', 'image1']
Ha:['vm1']
Group:
vm_backup1:['vm2-backup1']---vm2@
''' | apache-2.0 | 3,537,558,680,640,688,000 | 38.7 | 104 | 0.691145 | false |
britcey/ansible | lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py | 36 | 27324 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
version_added: "2.1"
short_description: Manage Azure network security groups.
description:
- Create, update or delete a network security group. A security group contains Access Control List (ACL) rules
that allow or deny network traffic to subnets or individual network interfaces. A security group is created
with a set of default security rules and an empty set of security rules. Shape traffic flow by adding
rules to the empty set of security rules.
options:
default_rules:
description:
- The set of default rules automatically added to a security group at creation. In general default
rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See
rules below for the makeup of a rule dict.
required: false
default: null
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- Name of the security group to operate on.
required: false
default: null
purge_default_rules:
description:
- Remove any existing rules not matching those defined in the default_rules parameter.
default: false
required: false
purge_rules:
description:
- Remove any existing rules not matching those defined in the rules parameters.
default: false
required: false
resource_group:
description:
- Name of the resource group the security group belongs to.
required: true
rules:
description:
- Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
required: false
default: null
suboptions:
name:
description:
- Unique name for the rule.
required: true
description:
description:
- Short description of the rule's purpose.
protocol:
description: Accepted traffic protocol.
choices:
- Udp
- Tcp
- "*"
default: "*"
source_port_range:
description:
- Port or range of ports from which traffic originates.
default: "*"
destination_port_range:
description:
- Port or range of ports to which traffic is headed.
default: "*"
source_address_prefix:
description:
- IP address or CIDR from which traffic originates.
default: "*"
destination_address_prefix:
description:
- IP address or CIDR to which traffic is headed.
default: "*"
access:
description:
- Whether or not to allow the traffic flow.
choices:
- Allow
- Deny
default: Allow
priority:
description:
- Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
required: true
direction:
description:
- Indicates the direction of the traffic flow.
choices:
- Inbound
- Outbound
default: Inbound
state:
description:
- Assert the state of the security group. Set to 'present' to create or update a security group. Set to
'absent' to remove a security group.
default: present
required: false
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
# Create a security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
purge_rules: yes
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22
access: Deny
priority: 100
direction: Inbound
- name: 'AllowSSH'
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22
access: Allow
priority: 101
direction: Inbound
# Update rules on existing security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22-23
access: Deny
priority: 100
direction: Inbound
- name: AllowSSHFromHome
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22-23
access: Allow
priority: 102
direction: Inbound
tags:
testing: testing
delete: on-exit
# Delete security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
state: absent
'''
RETURN = '''
state:
description: Current state of the security group.
returned: always
type: dict
sample: {
"default_rules": [
{
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "AzureLoadBalancer",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all inbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destination_address_prefix": "Internet",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all outbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
}
],
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup",
"location": "westus",
"name": "mysecgroup",
"network_interfaces": [],
"rules": [
{
"access": "Deny",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
"name": "DenySSH",
"priority": 100,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
"name": "AllowSSH",
"priority": 101,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "174.109.158.0/24",
"source_port_range": "*"
}
],
"subnets": [],
"tags": {
"delete": "on-exit",
"foo": "bar",
"testing": "testing"
},
"type": "Microsoft.Network/networkSecurityGroups"
}
''' # NOQA
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule
from azure.mgmt.network.models.network_management_client_enums import (SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol)
except ImportError:
# This is handled in azure_rm_common
pass
def validate_rule(rule, rule_type=None):
'''
Apply defaults to a rule dictionary and check that all values are valid.
:param rule: rule dict
:param rule_type: Set to 'default' if the rule is part of the default set of rules.
:return: None
'''
if not rule.get('name'):
raise Exception("Rule name value is required.")
priority = rule.get('priority', None)
if not priority:
raise Exception("Rule priority is required.")
if not isinstance(priority, (int, long)):
raise Exception("Rule priority attribute must be an integer.")
if rule_type != 'default' and (priority < 100 or priority > 4096):
raise Exception("Rule priority must be between 100 and 4096")
if not rule.get('access'):
rule['access'] = 'Allow'
access_names = [member.value for member in SecurityRuleAccess]
if rule['access'] not in access_names:
raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names)))
if not rule.get('destination_address_prefix'):
rule['destination_address_prefix'] = '*'
if not rule.get('source_address_prefix'):
rule['source_address_prefix'] = '*'
if not rule.get('protocol'):
rule['protocol'] = '*'
protocol_names = [member.value for member in SecurityRuleProtocol]
if rule['protocol'] not in protocol_names:
raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names)))
if not rule.get('direction'):
rule['direction'] = 'Inbound'
direction_names = [member.value for member in SecurityRuleDirection]
if rule['direction'] not in direction_names:
raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names)))
if not rule.get('source_port_range'):
rule['source_port_range'] = '*'
if not rule.get('destination_port_range'):
rule['destination_port_range'] = '*'
def compare_rules(r, rule):
matched = False
changed = False
if r['name'] == rule['name']:
matched = True
if rule.get('description', None) != r['description']:
changed = True
r['description'] = rule['description']
if rule['protocol'] != r['protocol']:
changed = True
r['protocol'] = rule['protocol']
if rule['source_port_range'] != r['source_port_range']:
changed = True
r['source_port_range'] = rule['source_port_range']
if rule['destination_port_range'] != r['destination_port_range']:
changed = True
r['destination_port_range'] = rule['destination_port_range']
if rule['access'] != r['access']:
changed = True
r['access'] = rule['access']
if rule['priority'] != r['priority']:
changed = True
r['priority'] = rule['priority']
if rule['direction'] != r['direction']:
changed = True
r['direction'] = rule['direction']
return matched, changed
def create_rule_instance(rule):
'''
Create an instance of SecurityRule from a dict.
:param rule: dict
:return: SecurityRule
'''
return SecurityRule(
rule['protocol'],
rule['source_address_prefix'],
rule['destination_address_prefix'],
rule['access'],
rule['direction'],
id=rule.get('id', None),
description=rule.get('description', None),
source_port_range=rule.get('source_port_range', None),
destination_port_range=rule.get('destination_port_range', None),
priority=rule.get('priority', None),
provisioning_state=rule.get('provisioning_state', None),
name=rule.get('name', None),
etag=rule.get('etag', None)
)
def create_rule_dict_from_obj(rule):
'''
Create a dict from an instance of a SecurityRule.
:param rule: SecurityRule
:return: dict
'''
return dict(
id=rule.id,
name=rule.name,
description=rule.description,
protocol=rule.protocol,
source_port_range=rule.source_port_range,
destination_port_range=rule.destination_port_range,
source_address_prefix=rule.source_address_prefix,
destination_address_prefix=rule.destination_address_prefix,
access=rule.access,
priority=rule.priority,
direction=rule.direction,
provisioning_state=rule.provisioning_state,
etag=rule.etag
)
def create_network_security_group_dict(nsg):
results = dict(
id=nsg.id,
name=nsg.name,
type=nsg.type,
location=nsg.location,
tags=nsg.tags,
)
results['rules'] = []
if nsg.security_rules:
for rule in nsg.security_rules:
results['rules'].append(create_rule_dict_from_obj(rule))
results['default_rules'] = []
if nsg.default_security_rules:
for rule in nsg.default_security_rules:
results['default_rules'].append(create_rule_dict_from_obj(rule))
results['network_interfaces'] = []
if nsg.network_interfaces:
for interface in nsg.network_interfaces:
results['network_interfaces'].append(interface.id)
results['subnets'] = []
if nsg.subnets:
for subnet in nsg.subnets:
results['subnets'].append(subnet.id)
return results
class AzureRMSecurityGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
default_rules=dict(type='list'),
location=dict(type='str'),
name=dict(type='str', required=True),
purge_default_rules=dict(type='bool', default=False),
purge_rules=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.rules:
for rule in self.rules:
try:
validate_rule(rule)
except Exception as exc:
self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
if self.default_rules:
for rule in self.default_rules:
try:
validate_rule(rule, 'default')
except Exception as exc:
self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
results = create_network_security_group_dict(nsg)
self.log("Found security group:")
self.log(results, pretty_print=True)
self.check_provisioning_state(nsg, self.state)
if self.state == 'present':
pass
elif self.state == 'absent':
self.log("CHANGED: security group found but state is 'absent'")
changed = True
except CloudError:
if self.state == 'present':
self.log("CHANGED: security group not found and state is 'present'")
changed = True
if self.state == 'present' and not changed:
# update the security group
self.log("Update security group {0}".format(self.name))
if self.rules:
for rule in self.rules:
rule_matched = False
for r in results['rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['rules'].append(rule)
if self.purge_rules:
new_rules = []
for rule in results['rules']:
for r in self.rules:
if rule['name'] == r['name']:
new_rules.append(rule)
results['rules'] = new_rules
if self.default_rules:
for rule in self.default_rules:
rule_matched = False
for r in results['default_rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['default_rules'].append(rule)
if self.purge_default_rules:
new_default_rules = []
for rule in results['default_rules']:
for r in self.default_rules:
if rule['name'] == r['name']:
new_default_rules.append(rule)
results['default_rules'] = new_default_rules
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'present' and changed:
# create the security group
self.log("Create security group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location required when creating a security group.")
results['name'] = self.name
results['location'] = self.location
results['rules'] = []
results['default_rules'] = []
results['tags'] = {}
if self.rules:
results['rules'] = self.rules
if self.default_rules:
results['default_rules'] = self.default_rules
if self.tags:
results['tags'] = self.tags
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'absent' and changed:
self.log("Delete security group {0}".format(self.name))
self.results['changed'] = changed
self.results['state'] = dict()
if not self.check_mode:
self.delete()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update(self, results):
parameters = NetworkSecurityGroup()
if results.get('rules'):
parameters.security_rules = []
for rule in results.get('rules'):
parameters.security_rules.append(create_rule_instance(rule))
if results.get('default_rules'):
parameters.default_security_rules = []
for rule in results.get('default_rules'):
parameters.default_security_rules.append(create_rule_instance(rule))
parameters.tags = results.get('tags')
parameters.location = results.get('location')
try:
poller = self.network_client.network_security_groups.create_or_update(self.resource_group,
self.name,
parameters)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
self.fail("Error creating/upating security group {0} - {1}".format(self.name, str(exc)))
return create_network_security_group_dict(result)
def delete(self):
try:
poller = self.network_client.network_security_groups.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMSecurityGroup()
if __name__ == '__main__':
main()
| gpl-3.0 | -218,122,055,015,500,740 | 36.95 | 211 | 0.549188 | false |
fastavro/fastavro | fastavro/_validation_py.py | 1 | 11047 | import array
import numbers
from collections.abc import Mapping, Sequence
from fastavro.const import INT_MAX_VALUE, INT_MIN_VALUE, LONG_MAX_VALUE, LONG_MIN_VALUE
from ._validate_common import ValidationError, ValidationErrorData
from .schema import extract_record_type, extract_logical_type, schema_name, parse_schema
from .logical_writers import LOGICAL_WRITERS
from ._schema_common import UnknownType
def validate_null(datum, **kwargs):
"""
Checks that the data value is None.
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return datum is None
def validate_boolean(datum, **kwargs):
"""
Check that the data value is bool instance
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, bool)
def validate_string(datum, **kwargs):
"""
Check that the data value is string
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, str)
def validate_bytes(datum, **kwargs):
"""
Check that the data value is python bytes type
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, (bytes, bytearray))
def validate_int(datum, **kwargs):
"""
Check that the data value is a non floating
point number with size less that Int32.
Int32 = -2147483648<=datum<=2147483647
conditional python types: int, numbers.Integral
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
isinstance(datum, (int, numbers.Integral))
and INT_MIN_VALUE <= datum <= INT_MAX_VALUE
and not isinstance(datum, bool)
)
def validate_long(datum, **kwargs):
"""
Check that the data value is a non floating
point number with size less that long64.
Int64 = -9223372036854775808 <= datum <= 9223372036854775807
conditional python types: int, numbers.Integral
:Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
isinstance(datum, (int, numbers.Integral))
and LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE
and not isinstance(datum, bool)
)
def validate_float(datum, **kwargs):
"""
Check that the data value is a floating
point number or double precision.
conditional python types
(int, float, numbers.Real)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, (int, float, numbers.Real)) and not isinstance(datum, bool)
def validate_fixed(datum, schema, **kwargs):
"""
Check that the data value is fixed width bytes,
matching the schema['size'] exactly!
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
kwargs: Any
Unused kwargs
"""
return isinstance(datum, bytes) and len(datum) == schema["size"]
def validate_enum(datum, schema, **kwargs):
"""
Check that the data value matches one of the enum symbols.
i.e "blue" in ["red", green", "blue"]
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
kwargs: Any
Unused kwargs
"""
return datum in schema["symbols"]
def validate_array(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data list values all match schema['items'].
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
return (
isinstance(datum, (Sequence, array.array))
and not isinstance(datum, str)
and all(
_validate(
datum=d,
schema=schema["items"],
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
for d in datum
)
)
def validate_map(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a Map(k,v)
matching values to schema['values'] type.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
return (
isinstance(datum, Mapping)
and all(isinstance(k, str) for k in datum)
and all(
_validate(
datum=v,
schema=schema["values"],
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
for v in datum.values()
)
)
def validate_record(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a Mapping type with all schema defined fields
validated as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
_, fullname = schema_name(schema, parent_ns)
return (
isinstance(datum, Mapping)
and not ("-type" in datum and datum["-type"] != fullname)
and all(
_validate(
datum=datum.get(f["name"], f.get("default")),
schema=f["type"],
named_schemas=named_schemas,
field=f"{fullname}.{f['name']}",
raise_errors=raise_errors,
)
for f in schema["fields"]
)
)
def validate_union(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a list type with possible options to
validate as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
if isinstance(datum, tuple):
(name, datum) = datum
for candidate in schema:
if extract_record_type(candidate) == "record":
schema_name = candidate["name"]
else:
schema_name = candidate
if schema_name == name:
return _validate(
datum,
schema=candidate,
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
else:
return False
errors = []
for s in schema:
try:
ret = _validate(
datum,
schema=s,
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
if ret:
# We exit on the first passing type in Unions
return True
except ValidationError as e:
errors.extend(e.errors)
if raise_errors:
raise ValidationError(*errors)
return False
VALIDATORS = {
"null": validate_null,
"boolean": validate_boolean,
"string": validate_string,
"int": validate_int,
"long": validate_long,
"float": validate_float,
"double": validate_float,
"bytes": validate_bytes,
"fixed": validate_fixed,
"enum": validate_enum,
"array": validate_array,
"map": validate_map,
"union": validate_union,
"error_union": validate_union,
"record": validate_record,
"error": validate_record,
"request": validate_record,
}
def _validate(datum, schema, named_schemas, field=None, raise_errors=True):
# This function expects the schema to already be parsed
record_type = extract_record_type(schema)
result = None
logical_type = extract_logical_type(schema)
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
validator = VALIDATORS.get(record_type)
if validator:
result = validator(
datum,
schema=schema,
named_schemas=named_schemas,
parent_ns=field,
raise_errors=raise_errors,
)
elif record_type in named_schemas:
result = _validate(
datum,
schema=named_schemas[record_type],
named_schemas=named_schemas,
field=field,
raise_errors=raise_errors,
)
else:
raise UnknownType(record_type)
if raise_errors and result is False:
raise ValidationError(ValidationErrorData(datum, schema, field))
return result
def validate(datum, schema, field=None, raise_errors=True):
"""
Determine if a python datum is an instance of a schema.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
field: str, optional
Record field being validated
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate
schema = {...}
record = {...}
validate(record, schema)
"""
named_schemas = {}
parsed_schema = parse_schema(schema, named_schemas)
return _validate(datum, parsed_schema, named_schemas, field, raise_errors)
def validate_many(records, schema, raise_errors=True):
"""
Validate a list of data!
Parameters
----------
records: iterable
List of records to validate
schema: dict
Schema
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate_many
schema = {...}
records = [{...}, {...}, ...]
validate_many(records, schema)
"""
named_schemas = {}
parsed_schema = parse_schema(schema, named_schemas)
errors = []
results = []
for record in records:
try:
results.append(
_validate(
record, parsed_schema, named_schemas, raise_errors=raise_errors
)
)
except ValidationError as e:
errors.extend(e.errors)
if raise_errors and errors:
raise ValidationError(*errors)
return all(results)
| mit | 2,820,898,320,142,424,600 | 24.049887 | 88 | 0.576718 | false |
jpurma/Kataja | kataja/saved/DerivationTree.py | 1 | 9184 | # coding=utf-8
# ############################################################################
#
# *** Kataja - Biolinguistic Visualization tool ***
#
# Copyright 2013 Jukka Purma
#
# This file is part of Kataja.
#
# Kataja is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kataja is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kataja. If not, see <http://www.gnu.org/licenses/>.
#
# ############################################################################
from kataja.SavedField import SavedField
from kataja.SavedObject import SavedObject
from kataja.saved.DerivationStep import DerivationStep
from kataja.singletons import log, ctrl
from kataja.syntactic_state_to_nodes import syntactic_state_to_nodes
from kataja.syntax.SyntaxState import SyntaxState
from collections import defaultdict
DONE_SUCCESS = 7
class DerivationTree(SavedObject):
""" Stores derivation steps for one forest and takes care of related
logic """
def __init__(self, forest=None):
super().__init__()
self.forest = forest
self.d = {}
self.branches = [] # state_id:s, last step of the branch
self.branch = [] # state_id:s
self.current_step_index = 0
self.current_step_id = 0
self.current_branch_id = 0 # state_id
self.current_branch_index = 0
self.child_map = defaultdict(list)
def add_step(self, d_step: SyntaxState or DerivationStep):
""" Store given syntactic state as a derivation step. Forest can switch which derivation
state it is currently displaying.
:param d_step: SyntaxState or DerivationStep object
:return:
"""
if isinstance(d_step, SyntaxState):
d_step = DerivationStep(d_step)
self.save_derivation_step(d_step)
def save_derivation_step(self, derivation_step: DerivationStep):
if not derivation_step.frozen:
derivation_step.freeze()
self.d[derivation_step.state_id] = derivation_step.frozen
def build_active_branch(self):
self.branch = self.build_branch(self.current_branch_id)
def build_branch(self, branch_id):
b = []
step = self.d.get(branch_id, None)
done = set()
while step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
b.append(state_id)
if parent_id in done:
print('looping branch, at parent ', parent_id)
break
step = self.d.get(parent_id, None)
done.add(parent_id)
return list(reversed(b))
def collect_states(self):
states = {}
for key, val in self.d.items():
state_key = key.rsplit('_', 1)[-1]
if state_key not in states:
uid, data, msg, state_id, parent_id, state_type, sort_order = val
states[int(state_key)] = msg, state_type
return states
def build_branches(self):
parents = {parent_id for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values()}
sortable_branches = [(sort_order, state_id) for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if state_id not in parents]
sortable_branches.sort()
self.branches = [state_id for sort_order, state_id in sortable_branches]
def build_child_map(self):
self.child_map = defaultdict(list)
sortable_values = [(sort_order, state_id, parent_id) for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if parent_id]
sortable_values.sort()
for sort_order, state_id, parent_id in sortable_values:
self.child_map[parent_id].append(state_id)
def iterate_branch(self, branch_id):
step = self.d.get(branch_id, None)
while step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
yield state_id
step = self.d.get(parent_id, None)
def get_roots(self):
return [state_id for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if not parent_id]
def update_dimensions(self):
self.build_branches()
self.build_child_map()
# @time_me
def restore_derivation_step(self):
d_step = self.get_derivation_step_by_id(self.current_step_id)
if d_step:
syntactic_state_to_nodes(self.forest, d_step.to_syn_state())
if d_step.msg:
log.info(f'<b>msg: {d_step.msg}</b>')
for log_msg in d_step.log:
if log_msg.strip():
log_msg = log_msg.replace("\t", " ")
log.info(f'<font color="#859900">{log_msg}</font>')
ctrl.main.parse_changed.emit()
def get_derivation_step_by_index(self, index):
state_id = self.branch[index]
return self.get_derivation_step_by_id(state_id)
def get_derivation_step_by_id(self, state_id):
uid, frozen_data, msg, state_id, parent_id, state_type, sort_order = self.d[state_id]
d_step = DerivationStep(None, uid=uid)
d_step.load_objects(frozen_data)
return d_step
def _find_branch_for(self, state_id):
for i, branch in enumerate(self.branches):
for step_id in self.iterate_branch(branch):
if step_id == state_id:
return branch
return 0
def jump_to_derivation_step_by_id(self, state_id):
self.current_step_id = state_id
if state_id in self.branch:
self.current_step_index = self.branch.index(state_id)
else:
self.current_branch_id = self._find_branch_for(state_id)
self.current_branch_index = self.branches.index(self.current_branch_id)
self.build_active_branch()
self.current_step_index = self.branch.index(state_id)
self.restore_derivation_step()
def next_derivation_step(self):
"""
:return:
"""
if self.current_step_index + 1 < len(self.branch):
self.current_step_index += 1
else:
self.current_step_index = 0
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def previous_derivation_step(self):
"""
:return:
"""
if self.current_step_index > 0:
self.current_step_index -= 1
else:
self.current_step_index = len(self.branch) - 1
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def jump_to_derivation_step(self, i):
"""
:return:
"""
self.current_step_index = i
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def jump_to_first_step(self):
self.jump_to_derivation_step(0)
def jump_to_last_step(self):
self.jump_to_derivation_step(len(self.branch) - 1)
def next_parse(self):
if self.current_branch_index + 1 < len(self.branches):
self.current_branch_index += 1
else:
self.current_branch_index = 0
self.show_parse(self.current_branch_index)
def previous_parse(self):
if self.current_branch_index > 0:
self.current_branch_index -= 1
else:
self.current_branch_index = len(self.branches) - 1
self.show_parse(self.current_branch_index)
def show_parse(self, parse_index):
if self.branches:
self.current_branch_id = self.branches[parse_index]
self.build_active_branch()
self.jump_to_last_step()
ctrl.main.parse_changed.emit()
def show_first_passing_parse(self):
passing = []
for i, branch in enumerate(self.branches):
step = self.d.get(branch, None)
if step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
if state_type == DONE_SUCCESS:
passing.append((sort_order, i))
i = 0
if passing:
passing.sort()
sort_order, i = passing[0]
self.current_branch_index = i
self.show_parse(i)
# ############## #
# #
# Save support #
# #
# ############## #
forest = SavedField("forest")
d = SavedField("d")
branches = SavedField("branches")
branch = SavedField("branch")
current_step_index = SavedField("current_step_index")
current_step_id = SavedField("current_step_id")
current_branch_index = SavedField("current_branch_index")
current_branch_id = SavedField("current_branch_id")
| gpl-3.0 | 669,036,241,590,469,200 | 36.182186 | 162 | 0.59375 | false |
bxshi/gem5 | src/arch/x86/isa/insts/x87/arithmetic/change_sign.py | 70 | 2266 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop FABS {
absfp st(0), st(0), SetStatus=True
};
def macroop FCHS {
chsfp st(0), st(0), SetStatus=True
};
'''
| bsd-3-clause | -1,265,562,859,332,499,700 | 47.212766 | 72 | 0.784643 | false |
tedi3231/openerp | build/lib/openerp/addons/account/project/project.py | 38 | 2477 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', size=64, required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', size=32, required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, OpenERP will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
account_analytic_journal()
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
account_journal()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,591,497,084,212,470,000 | 46.634615 | 357 | 0.638272 | false |
zhanghenry/stocks | django/conf/locale/ml/formats.py | 394 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause | 1,410,541,377,115,901,700 | 41.209302 | 81 | 0.516253 | false |
Luckyseal/wechatpy | wechatpy/client/api/device.py | 8 | 8280 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import urllib
from wechatpy.utils import to_text, to_binary
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatDevice(BaseWeChatAPI):
API_BASE_URL = 'https://api.weixin.qq.com/device/'
def send_message(self, device_type, device_id, user_id, content):
"""
主动发送消息给设备
详情请参考
http://iot.weixin.qq.com/document-2_3.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备ID
:param user_id: 微信用户账号的openid
:param content: 消息内容,BASE64编码
:return: 返回的 JSON 数据包
"""
content = to_text(base64.b64encode(to_binary(content)))
return self._post(
'transmsg',
data={
'device_type': device_type,
'device_id': device_id,
'openid': user_id,
'content': content
}
)
def create_qrcode(self, device_ids):
"""
获取设备二维码
详情请参考
http://iot.weixin.qq.com/document-2_5.html
:param device_ids: 设备id的列表
:return: 返回的 JSON 数据包
"""
return self._post(
'create_qrcode',
data={
'device_num': len(device_ids),
'device_id_list': device_ids
}
)
def get_qrcode_url(self, ticket, data=None):
"""
通过 ticket 换取二维码地址
详情请参考
http://iot.weixin.qq.com/document-2_5.html
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
"""
url = 'http://we.qq.com/d/{ticket}'.format(ticket=ticket)
if data:
if isinstance(data, (dict, tuple, list)):
data = urllib.urlencode(data)
data = to_text(base64.b64encode(to_binary(data)))
url = '{base}#{data}'.format(base=url, data=data)
return url
def bind(self, ticket, device_id, user_id):
"""
绑定设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'bind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
)
def unbind(self, ticket, device_id, user_id):
"""
解绑设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'unbind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
)
def compel_bind(self, device_id, user_id):
"""
强制绑定用户和设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'compel_bind',
data={
'device_id': device_id,
'openid': user_id
}
)
force_bind = compel_bind
def compel_unbind(self, device_id, user_id):
"""
强制解绑用户和设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'compel_unbind',
data={
'device_id': device_id,
'openid': user_id
}
)
force_unbind = compel_unbind
def get_stat(self, device_id):
"""
设备状态查询
详情请参考
http://iot.weixin.qq.com/document-2_7.html
:param device_id: 设备id
:return: 返回的 JSON 数据包
"""
return self._post(
'get_stat',
data={'device_id': device_id}
)
def verify_qrcode(self, ticket):
"""
验证二维码
详情请参考
http://iot.weixin.qq.com/document-2_9.html
:param ticket: 设备二维码的ticket
:return: 返回的 JSON 数据包
"""
return self._post(
'verify_qrcode',
data={'ticket': ticket}
)
def get_user_id(self, device_type, device_id):
"""
获取设备绑定openID
详情请参考
http://iot.weixin.qq.com/document-2_4.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备id
:return: 返回的 JSON 数据包
"""
return self._post(
'get_openid',
data={
'device_type': device_type,
'device_id': device_id
}
)
get_open_id = get_user_id
def get_binded_devices(self, user_id):
"""
通过openid获取用户在当前devicetype下绑定的deviceid列表
详情请参考
http://iot.weixin.qq.com/document-2_13.html
:param user_id: 要查询的用户的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'get_bind_device',
data={'openid': user_id}
)
get_bind_device = get_binded_devices
def send_status_message(self, device_type, device_id, user_id, status):
"""
主动发送设备状态消息给微信终端
详情请参考
http://iot.weixin.qq.com/document-2_10.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备ID
:param user_id: 微信用户账号的openid
:param status: 设备状态:0--未连接, 1--已连接
:return: 返回的 JSON 数据包
"""
return self._post(
'transmsg',
data={
'device_type': device_type,
'device_id': device_id,
'open_id': user_id,
'device_status': status
}
)
def authorize(self, devices, op_type=0):
"""
设备授权
详情请参考
http://iot.weixin.qq.com/document-2_6.html
:param devices: 设备信息的列表
:param op_type: 请求操作的类型,限定取值为:0:设备授权 1:设备更新
:return: 返回的 JSON 数据包
"""
return self._post(
'authorize',
data={
'device_num': len(devices),
'device_list': devices,
'op_type': op_type
}
)
def get_qrcode(self):
"""
获取deviceid和二维码
详情请参考
http://iot.weixin.qq.com/document-2_11.html
:return: 返回的 JSON 数据包
"""
return self._get('getqrcode')
def authorize_device(self, devices, op_type=1):
"""
设备授权
详情请参考
http://iot.weixin.qq.com/document-2_6.html
:param devices: 设备信息的列表
:param op_type: 请求操作的类型,限定取值为:0:设备授权 1:设备更新
:return: 返回的 JSON 数据包
"""
return self._post(
'authorize_device',
data={
'device_num': len(devices),
'device_list': devices,
'op_type': op_type
}
)
| mit | 1,080,269,611,498,647,300 | 24.352113 | 75 | 0.490556 | false |
fuhongliang/odoo | addons/document/__init__.py | 434 | 1128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,037,525,495,609,306,000 | 39.285714 | 78 | 0.628546 | false |
BhallaLab/moose | moose-examples/paper-2015/Fig2_elecModels/Fig2D.py | 2 | 3872 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates loading a model from an SWC file, inserting
# spines, and viewing it.
try:
import moogli
except Exception as e:
print( "[INFO ] Could not import moogli. Quitting..." )
quit()
import moose
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import rdesigneur as rd
PI = 3.14159265358979
frameRunTime = 0.0001
runtime = 0.1
inject = 15e-10
simdt = 5e-5
RM = 1.0
RA = 1.0
CM = 0.01
# This is the expression used to set spine spacing:
spineSpacing = "dia * 2"
minSpacing = 0.1e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle = 0
spineAngleDistrib = 2*PI
def create_vm_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path,
vertices=10)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.5,
1.0,
1.0),
moogli.colors.Color(1.0,
0.0,
0.0,
0.9)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
network.set("color", vms, mapper)
def interlude(view):
moose.start(frameRunTime)
#vms = [moose.element(x).Vm for x in network.shapes.keys()]
#network.set("color", vms, mapper)
view.pitch(0.01)
currTime = moose.element('/clock').currentTime
if currTime >= runtime:
view.stop()
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(list(network.shapes.values()))
view = moogli.View("vm-view",
interlude=interlude)
viewer.attach_view(view)
return viewer
def main():
######## Put your favourite cell model here ######
##This one is from PMID 19146814: Peng et al Neuron 2009
filename = 'cells/K-18.CNG.swc'
moose.Neutral( '/library' )
rdes = rd.rdesigneur( \
cellProto = [[ filename, 'elec' ] ],\
spineProto = [['makeSpineProto()', 'spine' ]] ,\
spineDistrib = [ \
['spine', '#', \
'spacing', spineSpacing, \
'spacingDistrib', str( minSpacing ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ) ] \
] \
)
rdes.buildModel('/model')
moose.reinit()
compts = moose.wildcardFind( "/model/elec/#[ISA=CompartmentBase]" )
compts[0].inject = inject
################## Now we set up the display ########################
print("Setting Up 3D Display")
app = QtGui.QApplication(sys.argv)
vm_viewer = create_vm_viewer(rdes)
vm_viewer.showMaximized()
vm_viewer.start()
return app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,466,346,624,588,140,800 | 34.522936 | 73 | 0.490961 | false |
AlphaCluster/NewsBlur | apps/rss_feeds/migrations/0039_feedicon.py | 18 | 6742 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedIcon'
db.create_table('rss_feeds_feedicon', (
('feed', self.gf('utils.fields.AutoOneToOneField')(related_name='icon', unique=True, primary_key=True, to=orm['rss_feeds.Feed'])),
('color', self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True)),
('data', self.gf('django.db.models.fields.TextField')()),
('icon_url', self.gf('django.db.models.fields.CharField')(max_length=2000, null=True, blank=True)),
('not_found', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('rss_feeds', ['FeedIcon'])
def backwards(self, orm):
# Deleting model 'FeedIcon'
db.delete_table('rss_feeds_feedicon')
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit | -2,100,673,499,900,515,800 | 69.229167 | 159 | 0.554583 | false |
ebaskoro/node-gyp | gyp/test/actions/gyptest-default.py | 243 | 2407 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
# Android also can't do this as it doesn't have order-only dependencies.
if test.format in ['ninja', 'android']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
test.pass_test()
| mit | 1,248,392,624,794,297,600 | 33.884058 | 79 | 0.727046 | false |
pudo/aleph | aleph/migrate/versions/666668eae682_refactor_alerts.py | 5 | 1856 | """refactor alerts
Revision ID: 666668eae682
Revises: 8526f853643a
Create Date: 2016-05-05 16:46:05.656646
"""
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '666668eae682'
down_revision = '8526f853643a'
def upgrade():
op.add_column('alert', sa.Column('entity_id', sa.String(length=32),
nullable=True))
op.add_column('alert', sa.Column('query_text', sa.Unicode(), nullable=True))
op.create_foreign_key(None, 'alert', 'entity', ['entity_id'], ['id'])
bind = op.get_bind()
meta = sa.MetaData()
meta.bind = bind
meta.reflect()
alert_table = meta.tables['alert']
rp = bind.execute(sa.select([alert_table]))
for alert in rp.fetchall():
deleted_at = alert.deleted_at
query_text = alert.query.get('q', [None])[0]
entity_id = alert.query.get('entity', [None])[0]
if entity_id is not None and len(entity_id) < 30:
entity_id = None
if entity_id is None and query_text is None:
deleted_at = datetime.utcnow()
q = sa.update(alert_table).where(alert_table.c.id == alert.id)
q = q.values(query_text=query_text,
entity_id=entity_id,
deleted_at=deleted_at)
bind.execute(q)
op.drop_column('alert', 'query')
op.drop_column('alert', 'signature')
def downgrade():
op.add_column('alert', sa.Column('signature', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.add_column('alert', sa.Column('query', postgresql.JSONB(),
autoincrement=False, nullable=True))
op.drop_constraint(None, 'alert', type_='foreignkey')
op.drop_column('alert', 'query_text')
op.drop_column('alert', 'entity_id')
| mit | -8,783,233,447,009,741,000 | 32.142857 | 80 | 0.622845 | false |
Tillsten/PyTournament | src/TourneyModes_v2.py | 1 | 3600 | '''
Created on 26.01.2010
@author: Tillsten
'''
from TeamsAndPlayers import *
import random
class Round(object):
def __init__(self, left_teams, freedraw):
self.games_open = []
self.games_finnished = []
self.games_in_progress = []
self.games = []
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
if b != freedraw:
self.games_open.append(g)
else:
g.insert_result([(7, 3)])
self.games_finnished.append(g)
#print self.games
class Single_ko(object):
def __init__(self, players_per_team = 1):
self.participants = []
self.players_per_team = players_per_team
self.rounds = []
self.games_open = []
self.games_finnished = []
self.games_in_progress = []
self.finnished = False
def add_team(self, player):
if self.players_per_team == 1:
self.participants.append(Single_team(player))
if self.players_per_team == 2:
self.participants.append(Double_team(player[0], player[1]))
def start(self):
self.freedraw = Single_team(Player("Freilos", "", 0))
n = 1
random.shuffle(self.participants)
while len(self.participants) > 2 ** n:
n += 1
for i in range(len(self.participants), 2 ** n):
self.participants.append(self.freedraw)
self.build_gametree(n)
def build_gametree(self, n):
self.games = []
left_teams = self.participants
first_round = []
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
first_round.append(g)
if b != self.freedraw:
self.games_open.append(g)
else:
g.insert_result([(7, 3)])
self.games_finnished.append(g)
while len(last_round) < 4:
left_teams = [g.winner for g in last_round]
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
for i in self.games: print i
if __name__ == '__main__':
import psyco
psyco.full()
anton = Single_team(Player("Anton", "A.", 1))
bart = Single_team(Player("Bart", "B.", 2))
caro = Single_team(Player("Caro", "C.", 3))
dieter = Single_team(Player("Dieter", "D.", 4))
edwin = Single_team(Player("Edwin", "E.", 5))
fi = Single_team(Player("Fieter", "F.", 6))
sko = Single_ko()
sko.add_team(anton)
sko.add_team(bart)
sko.add_team(caro)
sko.add_team(dieter)
sko.add_team(edwin)
sko.add_team(fi)
sko.start()
# sko.start_open_game()
# sko.insert_result([(3, 8)], 0)
# sko.start_open_game()
# sko.insert_result([(8, 3)], 0)
# sko.start_open_game()
# sko.start_open_game()
# sko.insert_result([(3, 4), (5, 9)])
# sko.insert_result([(3, 4), (5, 9)])
# sko.start_open_game()
# sko.start_open_game()
# sko.insert_result([(3, 4), (5, 9)])
# sko.insert_result([(3, 4), (5, 9)])
# print "rounds"
# for i in sko.rounds:
# for j in (i.games):
# print j
# sko.rankings()
| gpl-3.0 | 3,809,631,785,339,774,000 | 29 | 71 | 0.486944 | false |
orchidinfosys/odoo | addons/website/tests/test_crawl.py | 54 | 3414 | # -*- coding: utf-8 -*-
import logging
import urlparse
import time
import lxml.html
import openerp
import re
_logger = logging.getLogger(__name__)
class Crawler(openerp.tests.HttpCase):
""" Test suite crawling an openerp CMS instance and checking that all
internal links lead to a 200 response.
If a username and a password are provided, authenticates the user before
starting the crawl
"""
at_install = False
post_install = True
def crawl(self, url, seen=None, msg=''):
if seen == None:
seen = set()
url_slug = re.sub(r"[/](([^/=?&]+-)?[0-9]+)([/]|$)", '/<slug>/', url)
url_slug = re.sub(r"([^/=?&]+)=[^/=?&]+", '\g<1>=param', url_slug)
if url_slug in seen:
return seen
else:
seen.add(url_slug)
_logger.info("%s %s", msg, url)
r = self.url_open(url)
code = r.getcode()
self.assertIn( code, xrange(200, 300), "%s Fetching %s returned error response (%d)" % (msg, url, code))
if r.info().gettype() == 'text/html':
doc = lxml.html.fromstring(r.read())
for link in doc.xpath('//a[@href]'):
href = link.get('href')
parts = urlparse.urlsplit(href)
# href with any fragment removed
href = urlparse.urlunsplit((
parts.scheme,
parts.netloc,
parts.path,
parts.query,
''
))
# FIXME: handle relative link (not parts.path.startswith /)
if parts.netloc or \
not parts.path.startswith('/') or \
parts.path == '/web' or\
parts.path.startswith('/web/') or \
parts.path.startswith('/en_US/') or \
(parts.scheme and parts.scheme not in ('http', 'https')):
continue
self.crawl(href, seen, msg)
return seen
def test_10_crawl_public(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
seen = self.crawl('/', msg='Anonymous Coward')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "public crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request, ", count, duration, sql, duration/count, float(sql)/count)
def test_20_crawl_demo(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('demo', 'demo')
seen = self.crawl('/', msg='demo')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "demo crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
def test_30_crawl_admin(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('admin', 'admin')
seen = self.crawl('/', msg='admin')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "admin crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
| gpl-3.0 | 1,586,682,772,091,523,800 | 34.936842 | 152 | 0.534564 | false |
flyfei/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/keyfactory.py | 361 | 8791 | """Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
| apache-2.0 | -8,149,207,193,273,849,000 | 35.176955 | 78 | 0.670458 | false |
jymannob/CouchPotatoServer | libs/requests/packages/urllib3/util/request.py | 304 | 1924 | from base64 import b64encode
from ..packages import six
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
| gpl-3.0 | -8,295,405,752,127,636,000 | 27.294118 | 85 | 0.609148 | false |
midonet/kuryr | doc/source/conf.py | 2 | 2475 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx',
'reno.sphinxext'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kuryr'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 | 1,122,890,132,518,161,000 | 31.565789 | 79 | 0.688485 | false |
atty303/pyfilesystem | fs/httpfs.py | 1 | 1659 | """
fs.httpfs
=========
"""
from fs.base import FS
from fs.path import normpath
from fs.errors import ResourceNotFoundError, UnsupportedError
from urlparse import urlparse
from urllib2 import urlopen, URLError
class HTTPFS(FS):
"""Can barely be called a filesystem, but this enables the opener system
to open http files"""
def __init__(self, url):
self.root_url = url
def _make_url(self, path):
path = normpath(path)
url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/'))
return url
def open(self, path, mode="r"):
if '+' in mode or 'w' in mode or 'a' in mode:
raise UnsupportedError('write')
url = self._make_url(path)
try:
f = urlopen(url)
except URLError, e:
raise ResourceNotFoundError(path)
except OSError, e:
raise ResourceNotFoundError(path)
return f
def exists(self, path):
return self.isfile(path)
def isdir(self, path):
return False
def isfile(self, path):
url = self._make_url(path)
f = None
try:
try:
f = urlopen(url)
except (URLError, OSError):
return False
finally:
if f is not None:
f.close()
return True
def listdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
return []
| bsd-3-clause | -9,217,931,800,808,576,000 | 23.397059 | 76 | 0.496685 | false |
maleficarium/youtube-dl | youtube_dl/extractor/rtvnh.py | 17 | 2265 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class RTVNHIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rtvnh.nl/video/131946',
'md5': 'cdbec9f44550763c8afc96050fa747dc',
'info_dict': {
'id': '131946',
'ext': 'mp4',
'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
'thumbnail': 're:^https?:.*\.jpg$'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._parse_json(self._download_webpage(
'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
status = meta.get('status')
if status != 200:
raise ExtractorError(
'%s returned error code %d' % (self.IE_NAME, status), expected=True)
formats = []
rtmp_formats = self._extract_smil_formats(
'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id)
formats.extend(rtmp_formats)
for rtmp_format in rtmp_formats:
rtmp_url = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
rtsp_format = rtmp_format.copy()
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'url': rtmp_url.replace('rtmp://', 'rtsp://'),
'protocol': 'rtsp',
})
formats.append(rtsp_format)
http_base_url = rtmp_url.replace('rtmp://', 'http://')
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': meta['title'].strip(),
'thumbnail': meta.get('image'),
'formats': formats
}
| unlicense | 5,644,450,649,177,294,000 | 35.532258 | 84 | 0.52362 | false |
south-coast-science/scs_mfr | src/scs_mfr/cmd/cmd_csv_logger_conf.py | 1 | 3307 | """
Created on 18 Apr 2018
@author: Bruno Beloff ([email protected])
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdCSVLoggerConf(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog { [-r ROOT_PATH] [-o DELETE_OLDEST] [-i WRITE_INTERVAL] | "
"-d } [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--root", "-r", type="string", nargs=1, action="store", dest="root_path",
help="set filesystem logging directory")
self.__parser.add_option("--del-oldest", "-o", type="int", nargs=1, action="store", dest="delete_oldest",
help="delete oldest logs to recover space (1) or stop when full (0)")
self.__parser.add_option("--write-int", "-i", type="int", nargs=1, action="store", dest="write_interval",
help="write interval in seconds (0 for immediate writes)")
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete the logger configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
if self.write_interval is not None and self.write_interval < 0:
return False
return True
def is_complete(self):
if self.root_path is None or self.delete_oldest is None or self.write_interval is None:
return False
return True
def set(self):
if self.root_path is not None or self.delete_oldest is not None or self.write_interval is not None:
return True
return False
# ----------------------------------------------------------------------------------------------------------------
@property
def root_path(self):
return self.__opts.root_path
@property
def delete_oldest(self):
return None if self.__opts.delete_oldest is None else bool(self.__opts.delete_oldest)
@property
def write_interval(self):
return self.__opts.write_interval
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdCSVLoggerConf:{root_path:%s, delete_oldest:%s, write_interval:%s, delete:%s, verbose:%s}" % \
(self.root_path, self.delete_oldest, self.write_interval, self.delete, self.verbose)
| mit | -3,350,149,733,203,430,000 | 31.421569 | 118 | 0.487451 | false |
proversity-org/edx-platform | cms/djangoapps/contentstore/views/tests/test_tabs.py | 24 | 8492 | """ Tests for tab functions (just primitive). """
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from contentstore.views import tabs
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
class TabsPageTests(CourseTestCase):
"""Test cases for Tabs (a.k.a Pages) page"""
def setUp(self):
"""Common setup for tests"""
# call super class to setup course, etc.
super(TabsPageTests, self).setUp()
# Set the URL for tests
self.url = reverse_course_url('tabs_handler', self.course.id)
# add a static tab to the course, for code coverage
self.test_tab = ItemFactory.create(
parent_location=self.course.location,
category="static_tab",
display_name="Static_1"
)
self.reload_course()
def check_invalid_tab_id_response(self, resp):
"""Verify response is an error listing the invalid_tab_id"""
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
self.assertIn("invalid_tab_id", resp_content['error'])
def test_not_implemented(self):
"""Verify not implemented errors"""
# JSON GET request not supported
with self.assertRaises(NotImplementedError):
self.client.get(self.url)
# JSON POST request not supported
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'courseware'},
'unsupported_request': None,
}),
)
# invalid JSON POST request
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data={'invalid_request': None},
)
def test_view_index(self):
"""Basic check that the Pages page responds correctly"""
resp = self.client.get_html(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn('course-nav-list', resp.content)
def test_reorder_tabs(self):
"""Test re-ordering of tabs"""
# get the original tab ids
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
num_orig_tabs = len(orig_tab_ids)
# make sure we have enough tabs to play around with
self.assertGreaterEqual(num_orig_tabs, 5)
# reorder the last two tabs
tab_ids[num_orig_tabs - 1], tab_ids[num_orig_tabs - 2] = tab_ids[num_orig_tabs - 2], tab_ids[num_orig_tabs - 1]
# remove the middle tab
# (the code needs to handle the case where tabs requested for re-ordering is a subset of the tabs in the course)
removed_tab = tab_ids.pop(num_orig_tabs / 2)
self.assertEqual(len(tab_ids), num_orig_tabs - 1)
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new tab order
self.reload_course()
new_tab_ids = [tab.tab_id for tab in self.course.tabs]
self.assertEqual(new_tab_ids, tab_ids + [removed_tab])
self.assertNotEqual(new_tab_ids, orig_tab_ids)
def test_reorder_tabs_invalid_list(self):
"""Test re-ordering of tabs with invalid tab list"""
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
# reorder the first two tabs
tab_ids[0], tab_ids[1] = tab_ids[1], tab_ids[0]
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
def test_reorder_tabs_invalid_tab(self):
"""Test re-ordering of tabs with invalid tab"""
invalid_tab_ids = ['courseware', 'info', 'invalid_tab_id']
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in invalid_tab_ids]},
)
self.check_invalid_tab_id_response(resp)
def check_toggle_tab_visiblity(self, tab_type, new_is_hidden_setting):
"""Helper method to check changes in tab visibility"""
# find the tab
old_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
# visibility should be different from new setting
self.assertNotEqual(old_tab.is_hidden, new_is_hidden_setting)
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': old_tab.tab_id},
'is_hidden': new_is_hidden_setting,
}),
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new visibility setting
self.reload_course()
new_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
self.assertEqual(new_tab.is_hidden, new_is_hidden_setting)
def test_toggle_tab_visibility(self):
"""Test toggling of tab visibility"""
self.check_toggle_tab_visiblity('wiki', True)
self.check_toggle_tab_visiblity('wiki', False)
def test_toggle_invalid_tab_visibility(self):
"""Test toggling visibility of an invalid tab"""
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'invalid_tab_id'}
}),
)
self.check_invalid_tab_id_response(resp)
def test_tab_preview_html(self):
"""
Verify that the static tab renders itself with the correct HTML
"""
preview_url = '/xblock/{}/{}'.format(self.test_tab.location, STUDENT_VIEW)
resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
html = resp_content['html']
# Verify that the HTML contains the expected elements
self.assertIn('<span class="action-button-text">Edit</span>', html)
self.assertIn('<span class="sr">Duplicate this component</span>', html)
self.assertIn('<span class="sr">Delete this component</span>', html)
self.assertIn('<span data-tooltip="Drag to reorder" class="drag-handle action"></span>', html)
class PrimitiveTabEdit(ModuleStoreTestCase):
"""Tests for the primitive tab edit data manipulations"""
def test_delete(self):
"""Test primitive tab deletion."""
course = CourseFactory.create()
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 0)
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 1)
with self.assertRaises(IndexError):
tabs.primitive_delete(course, 6)
tabs.primitive_delete(course, 2)
self.assertNotIn({u'type': u'textbooks'}, course.tabs)
# Check that discussion has shifted up
self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})
def test_insert(self):
"""Test primitive tab insertion."""
course = CourseFactory.create()
tabs.primitive_insert(course, 2, 'notes', 'aname')
self.assertEquals(course.tabs[2], {'type': 'notes', 'name': 'aname'})
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 0, 'notes', 'aname')
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 3, 'static_tab', 'aname')
def test_save(self):
"""Test course saving."""
course = CourseFactory.create()
tabs.primitive_insert(course, 3, 'notes', 'aname')
course2 = modulestore().get_course(course.id)
self.assertEquals(course2.tabs[3], {'type': 'notes', 'name': 'aname'})
| agpl-3.0 | -8,597,676,547,037,847,000 | 36.409692 | 120 | 0.612106 | false |
IndonesiaX/edx-platform | lms/djangoapps/lms_xblock/mixin.py | 21 | 7673 | """
Namespace that defines fields common to all blocks used in the LMS
"""
#from django.utils.translation import ugettext_noop as _
from lazy import lazy
from xblock.fields import Boolean, Scope, String, XBlockMixin, Dict
from xblock.validation import ValidationMessage
from xmodule.modulestore.inheritance import UserPartitionList
from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError
# Please do not remove, this is a workaround for Django 1.8.
# more information can be found here: https://openedx.atlassian.net/browse/PLAT-902
_ = lambda text: text
class GroupAccessDict(Dict):
"""Special Dict class for serializing the group_access field"""
def from_json(self, access_dict):
if access_dict is not None:
return {int(k): access_dict[k] for k in access_dict}
def to_json(self, access_dict):
if access_dict is not None:
return {unicode(k): access_dict[k] for k in access_dict}
class LmsBlockMixin(XBlockMixin):
"""
Mixin that defines fields common to all blocks used in the LMS
"""
hide_from_toc = Boolean(
help=_("Whether to display this module in the table of contents"),
default=False,
scope=Scope.settings
)
format = String(
# Translators: "TOC" stands for "Table of Contents"
help=_("What format this module is in (used for deciding which "
"grader to apply, and what to show in the TOC)"),
scope=Scope.settings,
)
chrome = String(
display_name=_("Courseware Chrome"),
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
help=_("Enter the chrome, or navigation tools, to use for the XBlock in the LMS. Valid values are: \n"
"\"chromeless\" -- to not use tabs or the accordion; \n"
"\"tabs\" -- to use tabs only; \n"
"\"accordion\" -- to use the accordion only; or \n"
"\"tabs,accordion\" -- to use tabs and the accordion."),
scope=Scope.settings,
default=None,
)
default_tab = String(
display_name=_("Default Tab"),
help=_("Enter the tab that is selected in the XBlock. If not set, the Courseware tab is selected."),
scope=Scope.settings,
default=None,
)
source_file = String(
display_name=_("LaTeX Source File Name"),
help=_("Enter the source file name for LaTeX."),
scope=Scope.settings,
deprecated=True
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
group_access = GroupAccessDict(
help=_(
"A dictionary that maps which groups can be shown this block. The keys "
"are group configuration ids and the values are a list of group IDs. "
"If there is no key for a group configuration or if the set of group IDs "
"is empty then the block is considered visible to all. Note that this "
"field is ignored if the block is visible_to_staff_only."
),
default={},
scope=Scope.settings,
)
@lazy
def merged_group_access(self):
"""
This computes access to a block's group_access rules in the context of its position
within the courseware structure, in the form of a lazily-computed attribute.
Each block's group_access rule is merged recursively with its parent's, guaranteeing
that any rule in a parent block will be enforced on descendants, even if a descendant
also defined its own access rules. The return value is always a dict, with the same
structure as that of the group_access field.
When merging access rules results in a case where all groups are denied access in a
user partition (which effectively denies access to that block for all students),
the special value False will be returned for that user partition key.
"""
parent = self.get_parent()
if not parent:
return self.group_access or {}
merged_access = parent.merged_group_access.copy()
if self.group_access is not None:
for partition_id, group_ids in self.group_access.items():
if group_ids: # skip if the "local" group_access for this partition is None or empty.
if partition_id in merged_access:
if merged_access[partition_id] is False:
# special case - means somewhere up the hierarchy, merged access rules have eliminated
# all group_ids from this partition, so there's no possible intersection.
continue
# otherwise, if the parent defines group access rules for this partition,
# intersect with the local ones.
merged_access[partition_id] = list(
set(merged_access[partition_id]).intersection(group_ids)
) or False
else:
# add the group access rules for this partition to the merged set of rules.
merged_access[partition_id] = group_ids
return merged_access
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
def _get_user_partition(self, user_partition_id):
"""
Returns the user partition with the specified id. Raises
`NoSuchUserPartitionError` if the lookup fails.
"""
for user_partition in self.user_partitions:
if user_partition.id == user_partition_id:
return user_partition
raise NoSuchUserPartitionError("could not find a UserPartition with ID [{}]".format(user_partition_id))
def validate(self):
"""
Validates the state of this xblock instance.
"""
_ = self.runtime.service(self, "i18n").ugettext
validation = super(LmsBlockMixin, self).validate()
has_invalid_user_partitions = False
has_invalid_groups = False
for user_partition_id, group_ids in self.group_access.iteritems():
try:
user_partition = self._get_user_partition(user_partition_id)
except NoSuchUserPartitionError:
has_invalid_user_partitions = True
else:
# Skip the validation check if the partition has been disabled
if user_partition.active:
for group_id in group_ids:
try:
user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
has_invalid_groups = True
if has_invalid_user_partitions:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content group configurations.")
)
)
if has_invalid_groups:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content groups.")
)
)
return validation
| agpl-3.0 | 3,660,196,276,737,328,600 | 42.596591 | 114 | 0.607715 | false |
10clouds/edx-platform | lms/djangoapps/survey/tests/test_views.py | 29 | 5654 | """
Python tests for the Survey views
"""
import json
from collections import OrderedDict
from django.test.client import Client
from django.core.urlresolvers import reverse
from survey.models import SurveyForm, SurveyAnswer
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class SurveyViewsTests(ModuleStoreTestCase):
"""
All tests for the views.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(SurveyViewsTests, self).setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = UserFactory.create(username='student', email='[email protected]', password=self.password)
self.test_survey_name = 'TestSurvey'
self.test_form = '''
<input name="field1" /><input name="field2" /><select name="ddl"><option>1</option></select>
<textarea name="textarea" />
'''
self.student_answers = OrderedDict({
u'field1': u'value1',
u'field2': u'value2',
u'ddl': u'1',
u'textarea': u'textarea'
})
self.course = CourseFactory.create(
display_name='Test Course',
course_survey_required=True,
course_survey_name=self.test_survey_name
)
self.survey = SurveyForm.create(self.test_survey_name, self.test_form)
self.view_url = reverse('view_survey', args=[self.test_survey_name])
self.postback_url = reverse('submit_answers', args=[self.test_survey_name])
self.client.login(username=self.student.username, password=self.password)
def test_unauthenticated_survey_view(self):
"""
Asserts that an unauthenticated user cannot access a survey
"""
anon_user = Client()
resp = anon_user.get(self.view_url)
self.assertEquals(resp.status_code, 302)
def test_survey_not_found(self):
"""
Asserts that if we ask for a Survey that does not exist, then we get a 302 redirect
"""
resp = self.client.get(reverse('view_survey', args=['NonExisting']))
self.assertEquals(resp.status_code, 302)
def test_authenticated_survey_view(self):
"""
Asserts that an authenticated user can see the survey
"""
resp = self.client.get(self.view_url)
self.assertEquals(resp.status_code, 200)
# is the SurveyForm html present in the HTML response?
self.assertIn(self.test_form, resp.content)
def test_unautneticated_survey_postback(self):
"""
Asserts that an anonymous user cannot answer a survey
"""
anon_user = Client()
resp = anon_user.post(
self.postback_url,
self.student_answers
)
self.assertEquals(resp.status_code, 302)
def test_survey_postback_to_nonexisting_survey(self):
"""
Asserts that any attempts to post back to a non existing survey returns a 404
"""
resp = self.client.post(
reverse('submit_answers', args=['NonExisting']),
self.student_answers
)
self.assertEquals(resp.status_code, 404)
def test_survey_postback(self):
"""
Asserts that a well formed postback of survey answers is properly stored in the
database
"""
resp = self.client.post(
self.postback_url,
self.student_answers
)
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.content)
self.assertIn('redirect_url', data)
answers = self.survey.get_answers(self.student)
self.assertEquals(answers[self.student.id], self.student_answers)
def test_strip_extra_fields(self):
"""
Verify that any not expected field name in the post-back is not stored
in the database
"""
data = dict.copy(self.student_answers)
data['csrfmiddlewaretoken'] = 'foo'
data['_redirect_url'] = 'bar'
data['course_id'] = unicode(self.course.id)
resp = self.client.post(
self.postback_url,
data
)
self.assertEquals(resp.status_code, 200)
answers = self.survey.get_answers(self.student)
self.assertNotIn('csrfmiddlewaretoken', answers[self.student.id])
self.assertNotIn('_redirect_url', answers[self.student.id])
self.assertNotIn('course_id', answers[self.student.id])
# however we want to make sure we persist the course_id
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=self.survey
)
for answer_obj in answer_objs:
self.assertEquals(unicode(answer_obj.course_key), data['course_id'])
def test_encoding_answers(self):
"""
Verify that if some potentially harmful input data is sent, that is is properly HTML encoded
"""
data = dict.copy(self.student_answers)
data['field1'] = '<script type="javascript">alert("Deleting filesystem...")</script>'
resp = self.client.post(
self.postback_url,
data
)
self.assertEquals(resp.status_code, 200)
answers = self.survey.get_answers(self.student)
self.assertEqual(
'<script type="javascript">alert("Deleting filesystem...")</script>',
answers[self.student.id]['field1']
)
| agpl-3.0 | -1,037,389,096,768,592,800 | 32.258824 | 113 | 0.614255 | false |
OpenPymeMx/OCB | addons/account/report/account_invoice_report.py | 14 | 12081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(cr, uid, [('rate', '=', 1)], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Qty', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr':fields.integer('# of Lines', readonly=True),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
}
_order = 'date desc'
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.year, sub.month, sub.day, sub.product_id, sub.partner_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY'::text) AS year,
to_char(ai.date_invoice::timestamp with time zone, 'MM'::text) AS month,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY-MM-DD'::text) AS day,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id,
CASE
WHEN u.uom_type::text <> 'reference'::text
THEN ( SELECT product_uom.name
FROM product_uom
WHERE product_uom.uom_type::text = 'reference'::text
AND product_uom.active
AND product_uom.category_id = u.category_id LIMIT 1)
ELSE u.name
END AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor
ELSE ail.quantity / u.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor)
ELSE SUM(ail.quantity / u.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / (SELECT count(*) FROM account_invoice_line l where invoice_id = ai.id) *
count(*) AS residual
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ai.date_invoice, ai.id,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY'::text),
to_char(ai.date_invoice::timestamp with time zone, 'MM'::text),
to_char(ai.date_invoice::timestamp with time zone, 'YYYY-MM-DD'::text),
ai.partner_id, ai.payment_term, ai.period_id, u.name, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, u.uom_type, u.category_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM (
%s %s %s
) AS sub
JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id)
WHERE
cr.id IN (SELECT id
FROM res_currency_rate cr2
WHERE (cr2.currency_id = sub.currency_id)
AND ((sub.date IS NOT NULL AND cr2.name <= sub.date)
OR (sub.date IS NULL AND cr2.name <= NOW()))
ORDER BY name DESC LIMIT 1)
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
account_invoice_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,289,301,241,923,004,000 | 55.71831 | 200 | 0.553431 | false |
apache/airflow | airflow/contrib/sensors/qubole_sensor.py | 2 | 1176 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.sensors.qubole`."""
import warnings
from airflow.providers.qubole.sensors.qubole import ( # noqa
QuboleFileSensor,
QubolePartitionSensor,
QuboleSensor,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.sensors.qubole`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | 1,101,788,944,360,246,800 | 35.75 | 91 | 0.761054 | false |
piran9/Project | bindings/python/ns3modulegen_core_customizations.py | 15 | 19476 | import re
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
import sys
class SmartPointerTransformation(typehandlers.TypeTransformation):
"""
This class provides a "type transformation" that tends to support
NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
transformed into something like Parameter.new("Foo*", "foo",
transfer_ownership=False). Return values such as Ptr<Foo> are
transformed into ReturnValue.new("Foo*",
caller_owns_return=False). Since the underlying objects have
reference counting, PyBindGen does the right thing.
"""
def __init__(self):
super(SmartPointerTransformation, self).__init__()
self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
def _get_untransformed_type_traits(self, name):
m = self.rx.match(name)
is_const = False
if m is None:
return None, False
else:
name1 = m.group(2).strip()
if name1.startswith('const '):
name1 = name1[len('const '):]
is_const = True
if name1.endswith(' const'):
name1 = name1[:-len(' const')]
is_const = True
new_name = name1+' *'
if new_name.startswith('::'):
new_name = new_name[2:]
return new_name, is_const
def get_untransformed_name(self, name):
new_name, dummy_is_const = self._get_untransformed_type_traits(name)
return new_name
def create_type_handler(self, type_handler, *args, **kwargs):
if issubclass(type_handler, Parameter):
kwargs['transfer_ownership'] = False
elif issubclass(type_handler, ReturnValue):
kwargs['caller_owns_return'] = False
else:
raise AssertionError
## fix the ctype, add ns3:: namespace
orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
if is_const:
correct_ctype = 'ns3::Ptr< %s const >' % orig_ctype[:-2]
else:
correct_ctype = 'ns3::Ptr< %s >' % orig_ctype[:-2]
args = tuple([correct_ctype] + list(args[1:]))
handler = type_handler(*args, **kwargs)
handler.set_tranformation(self, orig_ctype)
return handler
def untransform(self, type_handler, declarations, code_block, expression):
return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
def transform(self, type_handler, declarations, code_block, expression):
assert type_handler.untransformed_ctype[-1] == '*'
return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
## register the type transformation
transf = SmartPointerTransformation()
typehandlers.return_type_matcher.register_transformation(transf)
typehandlers.param_type_matcher.register_transformation(transf)
del transf
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'!PyString_Check(item)',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'%s[%s] = PyString_AsString(item);' % (name, idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
"""
Class that generates a proxy virtual method that calls a similarly named python method.
"""
def __init__(self, return_value, parameters):
super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
def generate_python_call(self):
"""code to call the python method"""
build_params = self.build_params.get_parameters(force_tuple_creation=True)
if build_params[0][0] == '"':
build_params[0] = '(char *) ' + build_params[0]
args = self.before_call.declare_variable('PyObject*', 'args')
self.before_call.write_code('%s = Py_BuildValue(%s);'
% (args, ', '.join(build_params)))
self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
self.before_call.write_error_check('py_retval == NULL')
self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
def generate_callback_classes(out, callbacks):
for callback_impl_num, template_parameters in enumerate(callbacks):
sink = MemoryCodeSink()
cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
#print >> sys.stderr, "***** trying to register callback: %r" % cls_name
class_name = "PythonCallbackImpl%i" % callback_impl_num
sink.writeln('''
class %s : public ns3::CallbackImpl<%s>
{
public:
PyObject *m_callback;
%s(PyObject *callback)
{
Py_INCREF(callback);
m_callback = callback;
}
virtual ~%s()
{
PyGILState_STATE __py_gil_state;
__py_gil_state = (PyEval_ThreadsInitialized() ? PyGILState_Ensure() : (PyGILState_STATE) 0);
Py_DECREF(m_callback);
m_callback = NULL;
PyGILState_Release(__py_gil_state);
}
virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
{
const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
if (other != NULL)
return (other->m_callback == m_callback);
else
return false;
}
''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
sink.indent()
callback_return = template_parameters[0]
return_ctype = ctypeparser.parse_type(callback_return)
if ('const' in return_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
return_type = ReturnValue.new(str(return_ctype), **kwargs)
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
% (callback_return, cls_name, ex),
Warning)
continue
arguments = []
ok = True
callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
for arg_num, arg_type in enumerate(callback_parameters):
arg_name = 'arg%i' % (arg_num+1)
param_ctype = ctypeparser.parse_type(arg_type)
if ('const' in param_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
% (arg_type, arg_name, cls_name, ex),
Warning)
ok = False
if not ok:
continue
wrapper = CallbackImplProxyMethod(return_type, arguments)
wrapper.generate(sink, 'operator()', decl_modifiers=[])
sink.unindent()
sink.writeln('};\n')
sink.flush_to(out)
class PythonCallbackParameter(Parameter):
"Class handlers"
CTYPES = [cls_name]
print >> sys.stderr, "***** registering callback handler: %r" % ctypeparser.normalize_type_string(cls_name)
DIRECTIONS = [Parameter.DIRECTION_IN]
PYTHON_CALLBACK_IMPL_NAME = class_name
TEMPLATE_ARGS = template_parameters
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
if self.default_value is None:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
callback_impl = wrapper.declarations.declare_variable(
'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
'%s_cb_impl' % self.name)
wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
% (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.call_params.append(
'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
else:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
value = wrapper.declarations.declare_variable(
'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
self.name+'_value',
self.default_value)
wrapper.before_call.write_code("if (%s) {" % (py_callback,))
wrapper.before_call.indent()
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
% (value, ', '.join(self.TEMPLATE_ARGS),
self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes: if (py_callback) {
wrapper.call_params.append(value)
def convert_c_to_python(self, wrapper):
raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
"(python using callbacks defined in C++) not implemented.")
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def Object_customizations(module):
## ---------------------------------------------------------------------
## Here we generate custom constructor code for all classes that
## derive from ns3::Object. The custom constructors are needed in
## order to support kwargs only and to translate kwargs into ns3
## attributes, etc.
## ---------------------------------------------------------------------
try:
Object = module['ns3::Object']
except KeyError:
return
## add a GetTypeId method to all generatd helper classes
def helper_class_hook(helper_class):
decl = """
static ns3::TypeId GetTypeId (void)
{
static ns3::TypeId tid = ns3::TypeId ("%s")
.SetParent< %s > ()
;
return tid;
}""" % (helper_class.name, helper_class.class_.full_name)
helper_class.add_custom_method(decl)
helper_class.add_post_generation_code(
"NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
Object.add_helper_class_hook(helper_class_hook)
def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)"
% cpp_class.full_name)
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
code_block.write_code("%s->Ref ();" % (lvalue))
def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
Object.set_instance_creation_function(ns3_object_instance_creation_function)
Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
def Attribute_customizations(module):
# Fix up for the "const AttributeValue &v = EmptyAttribute()"
# case, as used extensively by helper classes.
# Here's why we need to do this: pybindgen.gccxmlscanner, when
# scanning parameter default values, is only provided with the
# value as a simple C expression string. (py)gccxml does not
# report the type of the default value.
# As a workaround, here we iterate over all parameters of all
# methods of all classes and tell pybindgen what is the type of
# the default value for attributes.
for cls in module.classes:
for meth in cls.get_all_methods():
for param in meth.parameters:
if isinstance(param, cppclass.CppClassRefParameter):
if param.cpp_class.name == 'AttributeValue' \
and param.default_value is not None \
and param.default_value_type is None:
param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
add_std_ios_openmode(module)
def add_std_ios_openmode(module):
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
| gpl-2.0 | -5,485,775,974,306,110,000 | 42.376392 | 119 | 0.584874 | false |
brunogamacatao/portalsaladeaula | django/contrib/localflavor/pl/forms.py | 46 | 5225 | """
Polish-specific form helpers
"""
import re
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLPESELField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
| bsd-3-clause | -4,652,114,944,998,184,000 | 33.150327 | 102 | 0.594833 | false |
cytec/SickRage | lib/feedparser/datetimes/__init__.py | 43 | 1368 | from __future__ import absolute_import
from .asctime import _parse_date_asctime
from .greek import _parse_date_greek
from .hungarian import _parse_date_hungarian
from .iso8601 import _parse_date_iso8601
from .korean import _parse_date_onblog, _parse_date_nate
from .perforce import _parse_date_perforce
from .rfc822 import _parse_date_rfc822
from .w3dtf import _parse_date_w3dtf
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
registerDateHandler(_parse_date_onblog)
registerDateHandler(_parse_date_nate)
registerDateHandler(_parse_date_greek)
registerDateHandler(_parse_date_hungarian)
registerDateHandler(_parse_date_perforce)
registerDateHandler(_parse_date_asctime)
registerDateHandler(_parse_date_iso8601)
registerDateHandler(_parse_date_rfc822)
registerDateHandler(_parse_date_w3dtf)
| gpl-3.0 | 4,233,569,163,551,560,700 | 32.365854 | 86 | 0.726608 | false |
2014cdbg3/2015cdbg9 | static/Brython3.1.1-20150328-091302/Lib/site-packages/spur.py | 291 | 5461 | #coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
dr = (ra-rd)/imax
else:
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
| gpl-3.0 | 199,738,541,096,231,840 | 34.680328 | 84 | 0.473696 | false |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/distutils/tests/test_install_scripts.py | 95 | 2652 | """Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
from test.test_support import run_unittest
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assertTrue(not cmd.force)
self.assertTrue(not cmd.skip_build)
self.assertTrue(cmd.build_dir is None)
self.assertTrue(cmd.install_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertTrue(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
try:
f.write(text)
finally:
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assertTrue(name in installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 | -2,939,922,622,268,922,000 | 31.341463 | 78 | 0.544872 | false |
hanxi/HXGame | libs/external/emscripten/third_party/ply/example/ansic/cparse.py | 164 | 20153 | # -----------------------------------------------------------------------------
# cparse.py
#
# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
# -----------------------------------------------------------------------------
import sys
import clex
import ply.yacc as yacc
# Get the token map
tokens = clex.tokens
# translation-unit:
def p_translation_unit_1(t):
'translation_unit : external_declaration'
pass
def p_translation_unit_2(t):
'translation_unit : translation_unit external_declaration'
pass
# external-declaration:
def p_external_declaration_1(t):
'external_declaration : function_definition'
pass
def p_external_declaration_2(t):
'external_declaration : declaration'
pass
# function-definition:
def p_function_definition_1(t):
'function_definition : declaration_specifiers declarator declaration_list compound_statement'
pass
def p_function_definition_2(t):
'function_definition : declarator declaration_list compound_statement'
pass
def p_function_definition_3(t):
'function_definition : declarator compound_statement'
pass
def p_function_definition_4(t):
'function_definition : declaration_specifiers declarator compound_statement'
pass
# declaration:
def p_declaration_1(t):
'declaration : declaration_specifiers init_declarator_list SEMI'
pass
def p_declaration_2(t):
'declaration : declaration_specifiers SEMI'
pass
# declaration-list:
def p_declaration_list_1(t):
'declaration_list : declaration'
pass
def p_declaration_list_2(t):
'declaration_list : declaration_list declaration '
pass
# declaration-specifiers
def p_declaration_specifiers_1(t):
'declaration_specifiers : storage_class_specifier declaration_specifiers'
pass
def p_declaration_specifiers_2(t):
'declaration_specifiers : type_specifier declaration_specifiers'
pass
def p_declaration_specifiers_3(t):
'declaration_specifiers : type_qualifier declaration_specifiers'
pass
def p_declaration_specifiers_4(t):
'declaration_specifiers : storage_class_specifier'
pass
def p_declaration_specifiers_5(t):
'declaration_specifiers : type_specifier'
pass
def p_declaration_specifiers_6(t):
'declaration_specifiers : type_qualifier'
pass
# storage-class-specifier
def p_storage_class_specifier(t):
'''storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
'''
pass
# type-specifier:
def p_type_specifier(t):
'''type_specifier : VOID
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| SIGNED
| UNSIGNED
| struct_or_union_specifier
| enum_specifier
| TYPEID
'''
pass
# type-qualifier:
def p_type_qualifier(t):
'''type_qualifier : CONST
| VOLATILE'''
pass
# struct-or-union-specifier
def p_struct_or_union_specifier_1(t):
'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_2(t):
'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_3(t):
'struct_or_union_specifier : struct_or_union ID'
pass
# struct-or-union:
def p_struct_or_union(t):
'''struct_or_union : STRUCT
| UNION
'''
pass
# struct-declaration-list:
def p_struct_declaration_list_1(t):
'struct_declaration_list : struct_declaration'
pass
def p_struct_declaration_list_2(t):
'struct_declaration_list : struct_declaration_list struct_declaration'
pass
# init-declarator-list:
def p_init_declarator_list_1(t):
'init_declarator_list : init_declarator'
pass
def p_init_declarator_list_2(t):
'init_declarator_list : init_declarator_list COMMA init_declarator'
pass
# init-declarator
def p_init_declarator_1(t):
'init_declarator : declarator'
pass
def p_init_declarator_2(t):
'init_declarator : declarator EQUALS initializer'
pass
# struct-declaration:
def p_struct_declaration(t):
'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI'
pass
# specifier-qualifier-list:
def p_specifier_qualifier_list_1(t):
'specifier_qualifier_list : type_specifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_2(t):
'specifier_qualifier_list : type_specifier'
pass
def p_specifier_qualifier_list_3(t):
'specifier_qualifier_list : type_qualifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_4(t):
'specifier_qualifier_list : type_qualifier'
pass
# struct-declarator-list:
def p_struct_declarator_list_1(t):
'struct_declarator_list : struct_declarator'
pass
def p_struct_declarator_list_2(t):
'struct_declarator_list : struct_declarator_list COMMA struct_declarator'
pass
# struct-declarator:
def p_struct_declarator_1(t):
'struct_declarator : declarator'
pass
def p_struct_declarator_2(t):
'struct_declarator : declarator COLON constant_expression'
pass
def p_struct_declarator_3(t):
'struct_declarator : COLON constant_expression'
pass
# enum-specifier:
def p_enum_specifier_1(t):
'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_2(t):
'enum_specifier : ENUM LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_3(t):
'enum_specifier : ENUM ID'
pass
# enumerator_list:
def p_enumerator_list_1(t):
'enumerator_list : enumerator'
pass
def p_enumerator_list_2(t):
'enumerator_list : enumerator_list COMMA enumerator'
pass
# enumerator:
def p_enumerator_1(t):
'enumerator : ID'
pass
def p_enumerator_2(t):
'enumerator : ID EQUALS constant_expression'
pass
# declarator:
def p_declarator_1(t):
'declarator : pointer direct_declarator'
pass
def p_declarator_2(t):
'declarator : direct_declarator'
pass
# direct-declarator:
def p_direct_declarator_1(t):
'direct_declarator : ID'
pass
def p_direct_declarator_2(t):
'direct_declarator : LPAREN declarator RPAREN'
pass
def p_direct_declarator_3(t):
'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_declarator_4(t):
'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN '
pass
def p_direct_declarator_5(t):
'direct_declarator : direct_declarator LPAREN identifier_list RPAREN '
pass
def p_direct_declarator_6(t):
'direct_declarator : direct_declarator LPAREN RPAREN '
pass
# pointer:
def p_pointer_1(t):
'pointer : TIMES type_qualifier_list'
pass
def p_pointer_2(t):
'pointer : TIMES'
pass
def p_pointer_3(t):
'pointer : TIMES type_qualifier_list pointer'
pass
def p_pointer_4(t):
'pointer : TIMES pointer'
pass
# type-qualifier-list:
def p_type_qualifier_list_1(t):
'type_qualifier_list : type_qualifier'
pass
def p_type_qualifier_list_2(t):
'type_qualifier_list : type_qualifier_list type_qualifier'
pass
# parameter-type-list:
def p_parameter_type_list_1(t):
'parameter_type_list : parameter_list'
pass
def p_parameter_type_list_2(t):
'parameter_type_list : parameter_list COMMA ELLIPSIS'
pass
# parameter-list:
def p_parameter_list_1(t):
'parameter_list : parameter_declaration'
pass
def p_parameter_list_2(t):
'parameter_list : parameter_list COMMA parameter_declaration'
pass
# parameter-declaration:
def p_parameter_declaration_1(t):
'parameter_declaration : declaration_specifiers declarator'
pass
def p_parameter_declaration_2(t):
'parameter_declaration : declaration_specifiers abstract_declarator_opt'
pass
# identifier-list:
def p_identifier_list_1(t):
'identifier_list : ID'
pass
def p_identifier_list_2(t):
'identifier_list : identifier_list COMMA ID'
pass
# initializer:
def p_initializer_1(t):
'initializer : assignment_expression'
pass
def p_initializer_2(t):
'''initializer : LBRACE initializer_list RBRACE
| LBRACE initializer_list COMMA RBRACE'''
pass
# initializer-list:
def p_initializer_list_1(t):
'initializer_list : initializer'
pass
def p_initializer_list_2(t):
'initializer_list : initializer_list COMMA initializer'
pass
# type-name:
def p_type_name(t):
'type_name : specifier_qualifier_list abstract_declarator_opt'
pass
def p_abstract_declarator_opt_1(t):
'abstract_declarator_opt : empty'
pass
def p_abstract_declarator_opt_2(t):
'abstract_declarator_opt : abstract_declarator'
pass
# abstract-declarator:
def p_abstract_declarator_1(t):
'abstract_declarator : pointer '
pass
def p_abstract_declarator_2(t):
'abstract_declarator : pointer direct_abstract_declarator'
pass
def p_abstract_declarator_3(t):
'abstract_declarator : direct_abstract_declarator'
pass
# direct-abstract-declarator:
def p_direct_abstract_declarator_1(t):
'direct_abstract_declarator : LPAREN abstract_declarator RPAREN'
pass
def p_direct_abstract_declarator_2(t):
'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_3(t):
'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_4(t):
'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN'
pass
def p_direct_abstract_declarator_5(t):
'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN'
pass
# Optional fields in abstract declarators
def p_constant_expression_opt_1(t):
'constant_expression_opt : empty'
pass
def p_constant_expression_opt_2(t):
'constant_expression_opt : constant_expression'
pass
def p_parameter_type_list_opt_1(t):
'parameter_type_list_opt : empty'
pass
def p_parameter_type_list_opt_2(t):
'parameter_type_list_opt : parameter_type_list'
pass
# statement:
def p_statement(t):
'''
statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
'''
pass
# labeled-statement:
def p_labeled_statement_1(t):
'labeled_statement : ID COLON statement'
pass
def p_labeled_statement_2(t):
'labeled_statement : CASE constant_expression COLON statement'
pass
def p_labeled_statement_3(t):
'labeled_statement : DEFAULT COLON statement'
pass
# expression-statement:
def p_expression_statement(t):
'expression_statement : expression_opt SEMI'
pass
# compound-statement:
def p_compound_statement_1(t):
'compound_statement : LBRACE declaration_list statement_list RBRACE'
pass
def p_compound_statement_2(t):
'compound_statement : LBRACE statement_list RBRACE'
pass
def p_compound_statement_3(t):
'compound_statement : LBRACE declaration_list RBRACE'
pass
def p_compound_statement_4(t):
'compound_statement : LBRACE RBRACE'
pass
# statement-list:
def p_statement_list_1(t):
'statement_list : statement'
pass
def p_statement_list_2(t):
'statement_list : statement_list statement'
pass
# selection-statement
def p_selection_statement_1(t):
'selection_statement : IF LPAREN expression RPAREN statement'
pass
def p_selection_statement_2(t):
'selection_statement : IF LPAREN expression RPAREN statement ELSE statement '
pass
def p_selection_statement_3(t):
'selection_statement : SWITCH LPAREN expression RPAREN statement '
pass
# iteration_statement:
def p_iteration_statement_1(t):
'iteration_statement : WHILE LPAREN expression RPAREN statement'
pass
def p_iteration_statement_2(t):
'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement '
pass
def p_iteration_statement_3(t):
'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI'
pass
# jump_statement:
def p_jump_statement_1(t):
'jump_statement : GOTO ID SEMI'
pass
def p_jump_statement_2(t):
'jump_statement : CONTINUE SEMI'
pass
def p_jump_statement_3(t):
'jump_statement : BREAK SEMI'
pass
def p_jump_statement_4(t):
'jump_statement : RETURN expression_opt SEMI'
pass
def p_expression_opt_1(t):
'expression_opt : empty'
pass
def p_expression_opt_2(t):
'expression_opt : expression'
pass
# expression:
def p_expression_1(t):
'expression : assignment_expression'
pass
def p_expression_2(t):
'expression : expression COMMA assignment_expression'
pass
# assigment_expression:
def p_assignment_expression_1(t):
'assignment_expression : conditional_expression'
pass
def p_assignment_expression_2(t):
'assignment_expression : unary_expression assignment_operator assignment_expression'
pass
# assignment_operator:
def p_assignment_operator(t):
'''
assignment_operator : EQUALS
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
| XOREQUAL
'''
pass
# conditional-expression
def p_conditional_expression_1(t):
'conditional_expression : logical_or_expression'
pass
def p_conditional_expression_2(t):
'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression '
pass
# constant-expression
def p_constant_expression(t):
'constant_expression : conditional_expression'
pass
# logical-or-expression
def p_logical_or_expression_1(t):
'logical_or_expression : logical_and_expression'
pass
def p_logical_or_expression_2(t):
'logical_or_expression : logical_or_expression LOR logical_and_expression'
pass
# logical-and-expression
def p_logical_and_expression_1(t):
'logical_and_expression : inclusive_or_expression'
pass
def p_logical_and_expression_2(t):
'logical_and_expression : logical_and_expression LAND inclusive_or_expression'
pass
# inclusive-or-expression:
def p_inclusive_or_expression_1(t):
'inclusive_or_expression : exclusive_or_expression'
pass
def p_inclusive_or_expression_2(t):
'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression'
pass
# exclusive-or-expression:
def p_exclusive_or_expression_1(t):
'exclusive_or_expression : and_expression'
pass
def p_exclusive_or_expression_2(t):
'exclusive_or_expression : exclusive_or_expression XOR and_expression'
pass
# AND-expression
def p_and_expression_1(t):
'and_expression : equality_expression'
pass
def p_and_expression_2(t):
'and_expression : and_expression AND equality_expression'
pass
# equality-expression:
def p_equality_expression_1(t):
'equality_expression : relational_expression'
pass
def p_equality_expression_2(t):
'equality_expression : equality_expression EQ relational_expression'
pass
def p_equality_expression_3(t):
'equality_expression : equality_expression NE relational_expression'
pass
# relational-expression:
def p_relational_expression_1(t):
'relational_expression : shift_expression'
pass
def p_relational_expression_2(t):
'relational_expression : relational_expression LT shift_expression'
pass
def p_relational_expression_3(t):
'relational_expression : relational_expression GT shift_expression'
pass
def p_relational_expression_4(t):
'relational_expression : relational_expression LE shift_expression'
pass
def p_relational_expression_5(t):
'relational_expression : relational_expression GE shift_expression'
pass
# shift-expression
def p_shift_expression_1(t):
'shift_expression : additive_expression'
pass
def p_shift_expression_2(t):
'shift_expression : shift_expression LSHIFT additive_expression'
pass
def p_shift_expression_3(t):
'shift_expression : shift_expression RSHIFT additive_expression'
pass
# additive-expression
def p_additive_expression_1(t):
'additive_expression : multiplicative_expression'
pass
def p_additive_expression_2(t):
'additive_expression : additive_expression PLUS multiplicative_expression'
pass
def p_additive_expression_3(t):
'additive_expression : additive_expression MINUS multiplicative_expression'
pass
# multiplicative-expression
def p_multiplicative_expression_1(t):
'multiplicative_expression : cast_expression'
pass
def p_multiplicative_expression_2(t):
'multiplicative_expression : multiplicative_expression TIMES cast_expression'
pass
def p_multiplicative_expression_3(t):
'multiplicative_expression : multiplicative_expression DIVIDE cast_expression'
pass
def p_multiplicative_expression_4(t):
'multiplicative_expression : multiplicative_expression MOD cast_expression'
pass
# cast-expression:
def p_cast_expression_1(t):
'cast_expression : unary_expression'
pass
def p_cast_expression_2(t):
'cast_expression : LPAREN type_name RPAREN cast_expression'
pass
# unary-expression:
def p_unary_expression_1(t):
'unary_expression : postfix_expression'
pass
def p_unary_expression_2(t):
'unary_expression : PLUSPLUS unary_expression'
pass
def p_unary_expression_3(t):
'unary_expression : MINUSMINUS unary_expression'
pass
def p_unary_expression_4(t):
'unary_expression : unary_operator cast_expression'
pass
def p_unary_expression_5(t):
'unary_expression : SIZEOF unary_expression'
pass
def p_unary_expression_6(t):
'unary_expression : SIZEOF LPAREN type_name RPAREN'
pass
#unary-operator
def p_unary_operator(t):
'''unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT '''
pass
# postfix-expression:
def p_postfix_expression_1(t):
'postfix_expression : primary_expression'
pass
def p_postfix_expression_2(t):
'postfix_expression : postfix_expression LBRACKET expression RBRACKET'
pass
def p_postfix_expression_3(t):
'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN'
pass
def p_postfix_expression_4(t):
'postfix_expression : postfix_expression LPAREN RPAREN'
pass
def p_postfix_expression_5(t):
'postfix_expression : postfix_expression PERIOD ID'
pass
def p_postfix_expression_6(t):
'postfix_expression : postfix_expression ARROW ID'
pass
def p_postfix_expression_7(t):
'postfix_expression : postfix_expression PLUSPLUS'
pass
def p_postfix_expression_8(t):
'postfix_expression : postfix_expression MINUSMINUS'
pass
# primary-expression:
def p_primary_expression(t):
'''primary_expression : ID
| constant
| SCONST
| LPAREN expression RPAREN'''
pass
# argument-expression-list:
def p_argument_expression_list(t):
'''argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression'''
pass
# constant:
def p_constant(t):
'''constant : ICONST
| FCONST
| CCONST'''
pass
def p_empty(t):
'empty : '
pass
def p_error(t):
print("Whoa. We're hosed")
import profile
# Build the grammar
yacc.yacc(method='LALR')
#profile.run("yacc.yacc(method='LALR')")
| mit | 2,550,662,019,828,215,000 | 22.35226 | 111 | 0.669826 | false |
rosmo/aurora | src/main/python/apache/aurora/client/api/sla.py | 5 | 12572 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import time
from collections import defaultdict, namedtuple
from twitter.common import log
from apache.aurora.client.base import DEFAULT_GROUPING, format_response, group_hosts
from apache.aurora.common.aurora_job_key import AuroraJobKey
from gen.apache.aurora.api.constants import LIVE_STATES
from gen.apache.aurora.api.ttypes import ResponseCode, ScheduleStatus, TaskQuery
def job_key_from_scheduled(task, cluster):
"""Creates AuroraJobKey from the ScheduledTask.
Arguments:
task -- ScheduledTask to get job key from.
cluster -- Cluster the task belongs to.
"""
config = task.assignedTask.task
return AuroraJobKey(
cluster=cluster.name,
role=config.job.role if config.job else config.owner.role,
env=config.job.environment if config.job else config.environment,
name=config.job.name if config.job else config.jobName
)
def task_query(hosts=None, job_keys=None):
"""Creates TaskQuery optionally scoped by a job(s) or hosts.
Arguments:
hosts -- list of hostnames to scope the query by.
job_keys -- list of AuroraJobKeys to scope the query by.
"""
return TaskQuery(
slaveHosts=set(hosts) if hosts else None,
jobKeys=[k.to_thrift() for k in job_keys] if job_keys else None,
statuses=LIVE_STATES)
class JobUpTimeSlaVector(object):
"""A grouping of job active tasks by:
- instance: Map of instance ID -> instance uptime in seconds.
Exposes an API for converting raw instance uptime data into job SLA metrics.
"""
def __init__(self, tasks, now=None):
self._tasks = tasks
self._now = now or time.time()
self._uptime_map = self._instance_uptime()
def total_tasks(self):
"""Returns the total count of active tasks."""
return len(self._uptime_map)
def get_wait_time_to_sla(self, percentile, duration, total_tasks=None):
"""Returns an approximate wait time until the job reaches the specified SLA
defined by percentile and duration.
Arguments:
percentile -- up count percentile to calculate wait time against.
duration -- uptime duration to calculate wait time against.
total_tasks -- optional total task count to calculate against.
"""
upcount = self.get_task_up_count(duration, total_tasks)
if upcount >= percentile:
return 0
# To get wait time to SLA:
# - Calculate the desired number of up instances in order to satisfy the percentile.
# - Find the desired index (x) in the instance list sorted in non-decreasing order of uptimes.
# If desired index outside of current element count -> return None for "infeasible".
# - Calculate wait time as: duration - duration(x)
elements = len(self._uptime_map)
total = total_tasks or elements
target_count = math.ceil(total * percentile / 100.0)
index = elements - int(target_count)
if index < 0 or index >= elements:
return None
else:
return duration - sorted(self._uptime_map.values())[index]
def get_task_up_count(self, duration, total_tasks=None):
"""Returns the percentage of job tasks that stayed up longer than duration.
Arguments:
duration -- uptime duration in seconds.
total_tasks -- optional total task count to calculate against.
"""
total = total_tasks or len(self._uptime_map)
above = len([uptime for uptime in self._uptime_map.values() if uptime >= duration])
return 100.0 * above / total if total else 0
def get_job_uptime(self, percentile):
"""Returns the uptime (in seconds) of the job at the specified percentile.
Arguments:
percentile -- percentile to report uptime for.
"""
if percentile <= 0 or percentile >= 100:
raise ValueError('Percentile must be within (0, 100), got %r instead.' % percentile)
total = len(self._uptime_map)
value = math.floor(percentile / 100.0 * total)
index = total - int(value) - 1
return sorted(self._uptime_map.values())[index] if 0 <= index < total else 0
def _instance_uptime(self):
instance_map = {}
for task in self._tasks:
for event in task.taskEvents:
if event.status == ScheduleStatus.RUNNING:
instance_map[task.assignedTask.instanceId] = math.floor(
self._now - event.timestamp / 1000)
break
return instance_map
JobUpTimeLimit = namedtuple('JobUpTimeLimit', ['job', 'percentage', 'duration_secs'])
JobUpTimeDetails = namedtuple('JobUpTimeDetails',
['job', 'predicted_percentage', 'safe', 'safe_in_secs'])
class DomainUpTimeSlaVector(object):
"""A grouping of all active tasks in the cluster by:
- job: Map of job_key -> task. Provides logical mapping between jobs and their active tasks.
- host: Map of hostname -> job_key. Provides logical mapping between hosts and their jobs.
Exposes an API for querying safe domain details.
"""
DEFAULT_MIN_INSTANCE_COUNT = 2
def __init__(self, cluster, tasks, min_instance_count=DEFAULT_MIN_INSTANCE_COUNT, hosts=None):
self._cluster = cluster
self._tasks = tasks
self._now = time.time()
self._tasks_by_job, self._jobs_by_host, self._hosts_by_job = self._init_mappings(
min_instance_count)
self._host_filter = hosts
def get_safe_hosts(self,
percentage,
duration,
job_limits=None,
grouping_function=DEFAULT_GROUPING):
"""Returns hosts safe to restart with respect to their job SLA.
Every host is analyzed separately without considering other job hosts.
Arguments:
percentage -- default task up count percentage. Used if job_limits mapping is not found.
duration -- default task uptime duration in seconds. Used if job_limits mapping is not found.
job_limits -- optional SLA override map. Key: job key. Value JobUpTimeLimit. If specified,
replaces default percentage/duration within the job context.
grouping_function -- grouping function to use to group hosts.
"""
safe_groups = []
for hosts, job_keys in self._iter_groups(
self._jobs_by_host.keys(), grouping_function, self._host_filter):
safe_hosts = defaultdict(list)
for job_key in job_keys:
job_hosts = hosts.intersection(self._hosts_by_job[job_key])
job_duration = duration
job_percentage = percentage
if job_limits and job_key in job_limits:
job_duration = job_limits[job_key].duration_secs
job_percentage = job_limits[job_key].percentage
filtered_percentage, _, _ = self._simulate_hosts_down(job_key, job_hosts, job_duration)
if filtered_percentage < job_percentage:
break
for host in job_hosts:
safe_hosts[host].append(JobUpTimeLimit(job_key, filtered_percentage, job_duration))
else:
safe_groups.append(safe_hosts)
return safe_groups
def probe_hosts(self, percentage, duration, grouping_function=DEFAULT_GROUPING):
"""Returns predicted job SLAs following the removal of provided hosts.
For every given host creates a list of JobUpTimeDetails with predicted job SLA details
in case the host is restarted, including: host, job_key, predicted up count, whether
the predicted job SLA >= percentage and the expected wait time in seconds for the job
to reach its SLA.
Arguments:
percentage -- task up count percentage.
duration -- task uptime duration in seconds.
grouping_function -- grouping function to use to group hosts.
"""
probed_groups = []
for hosts, job_keys in self._iter_groups(self._host_filter or [], grouping_function):
probed_hosts = defaultdict(list)
for job_key in job_keys:
job_hosts = hosts.intersection(self._hosts_by_job[job_key])
filtered_percentage, total_count, filtered_vector = self._simulate_hosts_down(
job_key, job_hosts, duration)
# Calculate wait time to SLA in case down host violates job's SLA.
if filtered_percentage < percentage:
safe = False
wait_to_sla = filtered_vector.get_wait_time_to_sla(percentage, duration, total_count)
else:
safe = True
wait_to_sla = 0
for host in job_hosts:
probed_hosts[host].append(
JobUpTimeDetails(job_key, filtered_percentage, safe, wait_to_sla))
if probed_hosts:
probed_groups.append(probed_hosts)
return probed_groups
def _iter_groups(self, hosts_to_group, grouping_function, host_filter=None):
groups = group_hosts(hosts_to_group, grouping_function)
for _, hosts in sorted(groups.items(), key=lambda v: v[0]):
job_keys = set()
for host in hosts:
if host_filter and host not in self._host_filter:
continue
job_keys = job_keys.union(self._jobs_by_host.get(host, set()))
yield hosts, job_keys
def _create_group_results(self, group, uptime_details):
result = defaultdict(list)
for host in group.keys():
result[host].append(uptime_details)
def _simulate_hosts_down(self, job_key, hosts, duration):
unfiltered_tasks = self._tasks_by_job[job_key]
# Get total job task count to use in SLA calculation.
total_count = len(unfiltered_tasks)
# Get a list of job tasks that would remain after the affected hosts go down
# and create an SLA vector with these tasks.
filtered_tasks = [task for task in unfiltered_tasks
if task.assignedTask.slaveHost not in hosts]
filtered_vector = JobUpTimeSlaVector(filtered_tasks, self._now)
# Calculate the SLA that would be in effect should the host go down.
filtered_percentage = filtered_vector.get_task_up_count(duration, total_count)
return filtered_percentage, total_count, filtered_vector
def _init_mappings(self, count):
tasks_by_job = defaultdict(list)
for task in self._tasks:
if task.assignedTask.task.production:
tasks_by_job[job_key_from_scheduled(task, self._cluster)].append(task)
# Filter jobs by the min instance count.
tasks_by_job = defaultdict(list, ((job, tasks) for job, tasks
in tasks_by_job.items() if len(tasks) >= count))
jobs_by_host = defaultdict(set)
hosts_by_job = defaultdict(set)
for job_key, tasks in tasks_by_job.items():
for task in tasks:
host = task.assignedTask.slaveHost
jobs_by_host[host].add(job_key)
hosts_by_job[job_key].add(host)
return tasks_by_job, jobs_by_host, hosts_by_job
class Sla(object):
"""Defines methods for generating job uptime metrics required for monitoring job SLA."""
def __init__(self, scheduler):
self._scheduler = scheduler
def get_job_uptime_vector(self, job_key):
"""Returns a JobUpTimeSlaVector object for the given job key.
Arguments:
job_key -- job to create a task uptime vector for.
"""
return JobUpTimeSlaVector(self._get_tasks(task_query(job_keys=[job_key])))
def get_domain_uptime_vector(self, cluster, min_instance_count, hosts=None):
"""Returns a DomainUpTimeSlaVector object with all available job uptimes.
Arguments:
cluster -- Cluster to get vector for.
min_instance_count -- Minimum job instance count to consider for domain uptime calculations.
hosts -- optional list of hostnames to query by.
"""
tasks = self._get_tasks(task_query(hosts=hosts)) if hosts else None
job_keys = set(job_key_from_scheduled(t, cluster) for t in tasks) if tasks else None
# Avoid full cluster pull if job_keys are missing for any reason but the hosts are specified.
job_tasks = [] if hosts and not job_keys else self._get_tasks(task_query(job_keys=job_keys))
return DomainUpTimeSlaVector(
cluster,
job_tasks,
min_instance_count=min_instance_count,
hosts=hosts)
def _get_tasks(self, task_query):
resp = self._scheduler.getTasksWithoutConfigs(task_query)
log.info(format_response(resp))
if resp.responseCode != ResponseCode.OK:
return []
return resp.result.scheduleStatusResult.tasks
| apache-2.0 | -4,513,388,311,178,006,000 | 37.329268 | 100 | 0.685412 | false |
Lochlan/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py | 119 | 23727 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import datetime
import StringIO
from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
from webkitpy.common.config import urls
from webkitpy.common.config.committers import Reviewer, Committer, Contributor, CommitterList
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.web_mock import MockBrowser
from webkitpy.thirdparty.mock import Mock
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BugzillaTest(unittest.TestCase):
_example_attachment = '''
<attachment
isobsolete="1"
ispatch="1"
isprivate="0"
>
<attachid>33721</attachid>
<date>2009-07-29 10:23 PDT</date>
<desc>Fixed whitespace issue</desc>
<filename>patch</filename>
<type>text/plain</type>
<size>9719</size>
<attacher>[email protected]</attacher>
<flag name="review"
id="17931"
status="+"
setter="[email protected]"
/>
<flag name="commit-queue"
id="17932"
status="+"
setter="[email protected]"
/>
</attachment>
'''
_expected_example_attachment_parsing = {
'attach_date': datetime.datetime(2009, 07, 29, 10, 23),
'bug_id' : 100,
'is_obsolete' : True,
'is_patch' : True,
'id' : 33721,
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
'review' : '+',
'reviewer_email' : '[email protected]',
'commit-queue' : '+',
'committer_email' : '[email protected]',
'attacher_email' : '[email protected]',
}
def test_url_creation(self):
# FIXME: These would be all better as doctests
bugs = Bugzilla()
self.assertIsNone(bugs.bug_url_for_bug_id(None))
self.assertIsNone(bugs.short_bug_url_for_bug_id(None))
self.assertIsNone(bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
# Test that we can parse the urls we produce.
bugs = Bugzilla()
self.assertEqual(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
_bug_xml = """
<bug>
<bug_id>32585</bug_id>
<creation_ts>2009-12-15 15:17 PST</creation_ts>
<short_desc>bug to test webkit-patch's and commit-queue's failures</short_desc>
<delta_ts>2009-12-27 21:04:50 PST</delta_ts>
<reporter_accessible>1</reporter_accessible>
<cclist_accessible>1</cclist_accessible>
<classification_id>1</classification_id>
<classification>Unclassified</classification>
<product>WebKit</product>
<component>Tools / Tests</component>
<version>528+ (Nightly build)</version>
<rep_platform>PC</rep_platform>
<op_sys>Mac OS X 10.5</op_sys>
<bug_status>NEW</bug_status>
<priority>P2</priority>
<bug_severity>Normal</bug_severity>
<target_milestone>---</target_milestone>
<everconfirmed>1</everconfirmed>
<reporter name="Eric Seidel">[email protected]</reporter>
<assigned_to name="Nobody">[email protected]</assigned_to>
<cc>[email protected]</cc>
<cc>[email protected]</cc>
<long_desc isprivate="0">
<who name="Eric Seidel">[email protected]</who>
<bug_when>2009-12-15 15:17:28 PST</bug_when>
<thetext>bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
</long_desc>
<attachment
isobsolete="0"
ispatch="1"
isprivate="0"
>
<attachid>45548</attachid>
<date>2009-12-27 23:51 PST</date>
<desc>Patch</desc>
<filename>bug-32585-20091228005112.patch</filename>
<type>text/plain</type>
<size>10882</size>
<attacher>[email protected]</attacher>
<token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
<data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
removed-because-it-was-really-long
ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
</data>
<flag name="review"
id="27602"
status="?"
setter="[email protected]"
/>
</attachment>
</bug>
"""
_single_bug_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
<bugzilla version="3.2.3"
urlbase="https://bugs.webkit.org/"
maintainer="[email protected]"
exporter="[email protected]"
>
%s
</bugzilla>
""" % _bug_xml
_expected_example_bug_parsing = {
"id" : 32585,
"title" : u"bug to test webkit-patch's and commit-queue's failures",
"cc_emails" : ["[email protected]", "[email protected]"],
"reporter_email" : "[email protected]",
"assigned_to_email" : "[email protected]",
"bug_status": "NEW",
"attachments" : [{
"attach_date": datetime.datetime(2009, 12, 27, 23, 51),
'name': u'Patch',
'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
'is_obsolete': False,
'review': '?',
'is_patch': True,
'attacher_email': '[email protected]',
'bug_id': 32585,
'type': 'text/plain',
'id': 45548
}],
"comments" : [{
'comment_date': datetime.datetime(2009, 12, 15, 15, 17, 28),
'comment_email': '[email protected]',
'text': """bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.""",
}]
}
# FIXME: This should move to a central location and be shared by more unit tests.
def _assert_dictionaries_equal(self, actual, expected):
# Make sure we aren't parsing more or less than we expect
self.assertItemsEqual(actual.keys(), expected.keys())
for key, expected_value in expected.items():
self.assertEqual(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
def test_parse_bug_dictionary_from_xml(self):
bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml)
self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
_sample_multi_bug_xml = """
<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="[email protected]" exporter="[email protected]">
%s
%s
</bugzilla>
""" % (_bug_xml, _bug_xml)
def test_parse_bugs_from_xml(self):
bugzilla = Bugzilla()
bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml)
self.assertEqual(len(bugs), 2)
self.assertEqual(bugs[0].id(), self._expected_example_bug_parsing['id'])
bugs = bugzilla._parse_bugs_from_xml("")
self.assertEqual(len(bugs), 0)
# This could be combined into test_bug_parsing later if desired.
def test_attachment_parsing(self):
bugzilla = Bugzilla()
soup = BeautifulSoup(self._example_attachment)
attachment_element = soup.find("attachment")
attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
self.assertTrue(attachment)
self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
_sample_attachment_detail_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>
Attachment 41073 Details for Bug 27314</title>
<link rel="Top" href="https://bugs.webkit.org/">
<link rel="Up" href="show_bug.cgi?id=27314">
"""
def test_attachment_detail_bug_parsing(self):
bugzilla = Bugzilla()
self.assertEqual(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
def test_add_cc_to_bug(self):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
expected_logs = "Adding ['[email protected]'] to the CC list for bug 42\n"
OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["[email protected]"]], expected_logs=expected_logs)
def _mock_control_item(self, name):
mock_item = Mock()
mock_item.name = name
return mock_item
def _mock_find_control(self, item_names=[], selected_index=0):
mock_control = Mock()
mock_control.items = [self._mock_control_item(name) for name in item_names]
mock_control.value = [item_names[selected_index]] if item_names else None
return lambda name, type: mock_control
def _assert_reopen(self, item_names=None, selected_index=None, extra_logs=None):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
mock_find_control = self._mock_find_control(item_names, selected_index)
bugzilla.browser.find_control = mock_find_control
expected_logs = "Re-opening bug 42\n['comment']\n"
if extra_logs:
expected_logs += extra_logs
OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_logs=expected_logs)
def test_reopen_bug(self):
self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1)
self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1)
extra_logs = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n"
self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_logs=extra_logs)
def test_file_object_for_upload(self):
bugzilla = Bugzilla()
file_object = StringIO.StringIO()
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object)
self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor)
self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor)
def test_filename_for_upload(self):
bugzilla = Bugzilla()
mock_file = Mock()
mock_file.name = "foo"
self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
mock_timestamp = lambda: "now"
filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
self.assertEqual(filename, "bug-1234-now.patch")
def test_commit_queue_flag(self):
bugzilla = Bugzilla()
bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "[email protected]")],
committers=[Committer("WebKit Committer", "[email protected]")],
contributors=[Contributor("WebKit Contributor", "[email protected]")])
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
bugzilla.username = username
capture = OutputCapture()
capture.capture_output()
try:
self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected)
finally:
capture.restore_output()
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
def test__check_create_bug_response(self):
bugzilla = Bugzilla()
title_html_bugzilla_323 = "<title>Bug 101640 Submitted</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_323), '101640')
title_html_bugzilla_425 = "<title>Bug 101640 Submitted – Testing webkit-patch again</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_425), '101640')
class BugzillaQueriesTest(unittest.TestCase):
_sample_request_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Request Queue</title>
</head>
<body>
<h3>Flag: review</h3>
<table class="requests" cellspacing="0" cellpadding="4" border="1">
<tr>
<th>Requester</th>
<th>Requestee</th>
<th>Bug</th>
<th>Attachment</th>
<th>Created</th>
</tr>
<tr>
<td>Shinichiro Hamaji <hamaji@chromium.org></td>
<td></td>
<td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
<td><a href="attachment.cgi?id=40511&action=review">
40511: Patch v0</a></td>
<td>2009-10-02 04:58 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40722&action=review">
40722: Media controls, the simple approach</a></td>
<td>2009-10-06 09:13 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40723&action=review">
40723: Adjust the media slider thumb size</a></td>
<td>2009-10-06 09:15 PST</td>
</tr>
</table>
</body>
</html>
"""
_sample_quip_page = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Bugzilla Quip System</title>
</head>
<body>
<h2>
Existing quips:
</h2>
<ul>
<li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li>
<li>Good artists copy. Great artists steal. - Pablo Picasso</li>
<li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li>
</ul>
</body>
</html>
"""
def _assert_result_count(self, queries, html, count):
self.assertEqual(queries._parse_result_count(html), count)
def test_parse_result_count(self):
queries = BugzillaQueries(None)
# Pages with results, always list the count at least twice.
self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314)
self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0)
self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1)
self.assertRaises(Exception, queries._parse_result_count, ['Invalid'])
def test_request_page_parsing(self):
queries = BugzillaQueries(None)
self.assertEqual([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
def test_quip_page_parsing(self):
queries = BugzillaQueries(None)
expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."]
self.assertEqual(expected_quips, queries._parse_quips(self._sample_quip_page))
def test_load_query(self):
queries = BugzillaQueries(Mock())
queries._load_query("request.cgi?action=queue&type=review&group=type")
class EditUsersParserTest(unittest.TestCase):
_example_user_results = """
<div id="bugzilla-body">
<p>1 user found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr>
<td >
<a href="editusers.cgi?action=edit&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
abarth@webkit.org
</a>
</td>
<td >
Adam Barth
</td>
<td >
<a href="editusers.cgi?action=activity&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
View
</a>
</td>
</tr>
</table>
"""
_example_empty_user_results = """
<div id="bugzilla-body">
<p>0 users found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr><td colspan="3" align="center"><i><none></i></td></tr>
</table>
"""
def _assert_login_userid_pairs(self, results_page, expected_logins):
parser = EditUsersParser()
logins = parser.login_userid_pairs_from_edit_user_results(results_page)
self.assertEqual(logins, expected_logins)
def test_logins_from_editusers_results(self):
self._assert_login_userid_pairs(self._example_user_results, [("[email protected]", 1234)])
self._assert_login_userid_pairs(self._example_empty_user_results, [])
_example_user_page = """<table class="main"><tr>
<th><label for="login">Login name:</label></th>
<td>eric@webkit.org
</td>
</tr>
<tr>
<th><label for="name">Real name:</label></th>
<td>Eric Seidel
</td>
</tr>
<tr>
<th>Group access:</th>
<td>
<table class="groups">
<tr>
</tr>
<tr>
<th colspan="2">User is a member of these groups</th>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_7"
name="group_7"
value="1" checked="checked" /></td>
<td class="groupname">
<label for="group_7">
<strong>canconfirm:</strong>
Can confirm a bug.
</label>
</td>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_6"
name="group_6"
value="1" /></td>
<td class="groupname">
<label for="group_6">
<strong>editbugs:</strong>
Can edit all aspects of any bug.
/label>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Product responsibilities:</th>
<td>
<em>none</em>
</td>
</tr>
</table>"""
def test_user_dict_from_edit_user_page(self):
parser = EditUsersParser()
user_dict = parser.user_dict_from_edit_user_page(self._example_user_page)
expected_user_dict = {u'login': u'[email protected]', u'groups': set(['canconfirm']), u'name': u'Eric Seidel'}
self.assertEqual(expected_user_dict, user_dict)
| bsd-3-clause | 71,401,643,696,353,624 | 41.521505 | 235 | 0.62039 | false |
zookeepr/zookeepr | zkpylons/tests/functional/test_ceiling.py | 3 | 6694 | from routes import url_for
from zk.model import Product
from BeautifulSoup import BeautifulSoup
from .fixtures import CeilingFactory, ProductCategoryFactory, ProductFactory, PersonFactory, RoleFactory, RegistrationFactory, InvoiceFactory, InvoiceItemFactory, CompletePersonFactory
from .utils import do_login
from .crud_helper import CrudHelper
class TestCeiling(CrudHelper):
def test_new(self, app, db_session):
cats = [ProductCategoryFactory() for i in range(2)]
products = [ProductFactory(category=cats[0]) for i in range (4)] \
+ [ProductFactory(category=cats[1]) for i in range (3)]
data = {
'name' : 'test_new',
'max_sold' : '23',
'available_from' : '01/02/1945',
'available_until' : '02/03/1956',
'products' : [products[0].id, products[3].id, products[6].id],
}
def extra_form_check(form):
assert len(form.fields['ceiling.products'][0].options) == len(products)
def extra_data_check(new):
# Datetime object and multiple products are too complex for the default check
# So we disable the default data check and replace it with this
assert new.parent is None
assert new.name == 'test_new'
assert new.max_sold == 23
assert new.available_from.date().isoformat() == '1945-02-01'
assert new.available_until.date().isoformat() == '1956-03-02'
selected_ids = data['products']
assert len(new.products) == len(selected_ids)
for pid in selected_ids:
p = Product.find_by_id(pid)
assert p in new.products
CrudHelper.test_new(self, app, db_session, data=data, extra_form_check = extra_form_check, do_data_check=False, extra_data_check = extra_data_check)
# TODO: Invalid content, different date styles
def test_view(self, app, db_session):
# These are the number of special people in a given ceiling group
# Such as number of under 18s or number of special diet folk
pc1 = ProductCategoryFactory()
prods = [ProductFactory(category=pc1) for i in range(2)]
ceil = CeilingFactory(max_sold=4223, available_from='2012-12-01', available_until='1901-06-23', products=prods)
peeps = [CompletePersonFactory() for i in range(3)]
reg1 = RegistrationFactory(person=peeps[0], diet="Wabbits", over18=True)
reg2 = RegistrationFactory(person=peeps[1], diet="Wrascles", over18=False)
reg3 = RegistrationFactory(person=peeps[2], diet="", over18=False)
# need a new invoice item for each invoice
for peep in peeps:
InvoiceFactory(person=peep, items=[InvoiceItemFactory(product=p) for p in prods])
db_session.commit()
expected = [
ceil.name, str(ceil.max_sold),
ceil.available_from.strftime("%d/%m/%y"), ceil.available_until.strftime("%d/%m/%y"),
] + [p.description for p in ceil.products]
resp = CrudHelper.test_view(self, app, db_session, expected=expected, target=ceil)
print resp
soup = BeautifulSoup(resp.body)
def process_table(name):
table = soup.find(id=name).findNext('table')
return [row.findAll('td') for row in table.find('tbody').findAll('tr')]
dietspec_paid = process_table("diet_special_paid")
assert len(dietspec_paid) == 1
assert dietspec_paid[0][0].find(text="No entries")
dietspec_unpaid = process_table("diet_special_invoiced")
assert len(dietspec_unpaid) == 2
for pers in [peeps[0], peeps[1]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in dietspec_unpaid for c in r])) == 1
diet_paid = process_table("diet_paid")
assert len(diet_paid) == 1
assert diet_paid[0][0].find(text="No entries")
diet_unpaid = process_table("diet_invoiced")
assert len(diet_unpaid) == 2
for pers in [peeps[0], peeps[1]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in diet_unpaid for c in r])) == 1
u18_paid = process_table("under18_paid")
assert len(u18_paid) == 1
assert u18_paid[0][0].find(text="No entries")
u18_unpaid = process_table("under18_invoiced")
assert len(u18_unpaid) == 2*2
for pers in [peeps[1], peeps[2]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in u18_unpaid for c in r])) == 2
def test_edit(self, app, db_session):
cats = [ProductCategoryFactory() for i in range(2)]
products = [ProductFactory(category=cats[0]) for i in range (4)] \
+ [ProductFactory(category=cats[1]) for i in range (3)]
c = CeilingFactory(max_sold=4223, available_from='2012-12-01', available_until='1901-06-23', products=[products[0], products[4], products[1]])
initial_values = {
'max_sold' : str(c.max_sold),
'available_from' : '01/12/12',
'available_until' : '23/06/01',
'products' : [str(products[i].id) for i in (0, 1, 4)],
}
new_values = {
'name' : 'test_new',
'max_sold' : '23',
'available_from' : '01/02/1945',
'available_until' : '02/03/1956',
'products' : [products[0].id, products[3].id, products[6].id],
}
db_session.commit()
def extra_form_check(form):
assert len(form.fields['ceiling.products'][0].options) == 7
def extra_data_check(new):
# Datetime object and multiple products are too complex for the default check
# So we disable the default data check and replace it with this
assert new.parent is None
assert new.name == 'test_new'
assert new.max_sold == 23
assert new.available_from.date().isoformat() == '1945-02-01'
assert new.available_until.date().isoformat() == '1956-03-02'
selected_ids = new_values['products']
assert len(new.products) == len(selected_ids)
for pid in selected_ids:
p = Product.find_by_id(pid)
assert p in new.products
# TODO: Invalid content, different date styles
CrudHelper.test_edit(self, app, db_session, initial_values=initial_values, new_values=new_values, extra_form_check=extra_form_check, do_data_check=False, extra_data_check=extra_data_check, pageid=c.id)
| gpl-2.0 | -8,251,438,130,191,793,000 | 43.331126 | 209 | 0.591724 | false |
CMLL/taiga-back | taiga/front/templatetags/functions.py | 14 | 1300 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django_jinja import library
from django_sites import get_by_id as get_site_by_id
from taiga.front.urls import urls
register = library.Library()
@register.global_function(name="resolve_front_url")
def resolve(type, *args):
site = get_site_by_id("front")
url_tmpl = "{scheme}//{domain}{url}"
scheme = site.scheme and "{0}:".format(site.scheme) or ""
url = urls[type].format(*args)
return url_tmpl.format(scheme=scheme, domain=site.domain, url=url)
| agpl-3.0 | 8,873,625,767,029,446,000 | 37.176471 | 74 | 0.738059 | false |
ehashman/oh-mainline | vendor/packages/Django/django/contrib/gis/tests/relatedapp/tests.py | 198 | 14731 | from __future__ import absolute_import
from datetime import date
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| agpl-3.0 | 2,062,797,297,182,440,000 | 47.778146 | 155 | 0.638382 | false |
alxgu/ansible | lib/ansible/plugins/action/dellos9.py | 38 | 4095 | #
# (c) 2016 Red Hat Inc.
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.dellos9.dellos9 import dellos9_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'dellos9_config' else False
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(dellos9_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'dellos9'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 | -7,493,177,357,316,088,000 | 43.032258 | 122 | 0.658364 | false |
smilusingjavascript/blink | Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win.py | 50 | 5429 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import shlex
from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
_log = logging.getLogger(__name__)
class DumpReaderWin(DumpReader):
"""DumpReader for windows breakpad."""
def __init__(self, host, build_dir):
super(DumpReaderWin, self).__init__(host, build_dir)
self._cdb_available = None
def check_is_functional(self):
return self._check_cdb_available()
def _file_extension(self):
return 'txt'
def _get_pid_from_dump(self, dump_file):
with self._host.filesystem.open_text_file_for_reading(dump_file) as f:
crash_keys = dict([l.split(':', 1) for l in f.read().splitlines()])
if 'pid' in crash_keys:
return crash_keys['pid']
return None
def _get_stack_from_dump(self, dump_file):
minidump = dump_file[:-3] + 'dmp'
cmd = [self._cdb_path, '-y', self._build_dir, '-c', '.lines;.ecxr;k30;q', '-z', minidump]
try:
stack = self._host.executive.run_command(cmd)
except:
_log.warning('Failed to execute "%s"' % ' '.join(cmd))
else:
return stack
return None
def _find_depot_tools_path(self):
"""Attempt to find depot_tools location in PATH."""
for i in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(i, 'gclient')):
return i
def _check_cdb_available(self):
"""Checks whether we can use cdb to symbolize minidumps."""
if self._cdb_available != None:
return self._cdb_available
CDB_LOCATION_TEMPLATES = [
'%s\\Debugging Tools For Windows',
'%s\\Debugging Tools For Windows (x86)',
'%s\\Debugging Tools For Windows (x64)',
'%s\\Windows Kits\\8.0\\Debuggers\\x86',
'%s\\Windows Kits\\8.0\\Debuggers\\x64',
'%s\\Windows Kits\\8.1\\Debuggers\\x86',
'%s\\Windows Kits\\8.1\\Debuggers\\x64',
]
program_files_directories = ['C:\\Program Files']
program_files = os.environ.get('ProgramFiles')
if program_files:
program_files_directories.append(program_files)
program_files = os.environ.get('ProgramFiles(x86)')
if program_files:
program_files_directories.append(program_files)
possible_cdb_locations = []
for template in CDB_LOCATION_TEMPLATES:
for program_files in program_files_directories:
possible_cdb_locations.append(template % program_files)
gyp_defines = os.environ.get('GYP_DEFINES', [])
if gyp_defines:
gyp_defines = shlex.split(gyp_defines)
if 'windows_sdk_path' in gyp_defines:
possible_cdb_locations.extend([
'%s\\Debuggers\\x86' % gyp_defines['windows_sdk_path'],
'%s\\Debuggers\\x64' % gyp_defines['windows_sdk_path'],
])
# Look in depot_tools win_toolchain too.
depot_tools = self._find_depot_tools_path()
if depot_tools:
win8sdk = os.path.join(depot_tools, 'win_toolchain', 'vs2013_files', 'win8sdk')
possible_cdb_locations.extend([
'%s\\Debuggers\\x86' % win8sdk,
'%s\\Debuggers\\x64' % win8sdk,
])
for cdb_path in possible_cdb_locations:
cdb = self._host.filesystem.join(cdb_path, 'cdb.exe')
try:
_ = self._host.executive.run_command([cdb, '-version'])
except:
pass
else:
self._cdb_path = cdb
self._cdb_available = True
return self._cdb_available
_log.warning("CDB is not installed; can't symbolize minidumps.")
_log.warning('')
self._cdb_available = False
return self._cdb_available
| bsd-3-clause | -3,563,751,862,695,588,400 | 38.919118 | 97 | 0.624609 | false |
heli522/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause | -2,765,210,434,764,842,000 | 38.826167 | 79 | 0.535728 | false |
Michaelhobo/ee149-final-project | mavlink/pymavlink/tools/mavextract.py | 41 | 2555 | #!/usr/bin/env python
'''
extract one mode type from a log
'''
import sys, time, os, struct
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--robust", action='store_true', help="Enable robust parsing (skip over bad data)")
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--mode", default='auto', help="mode to extract")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def process(filename):
'''process one logfile'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps,
robust_parsing=args.robust)
ext = os.path.splitext(filename)[1]
isbin = ext in ['.bin', '.BIN']
islog = ext in ['.log', '.LOG']
output = None
count = 1
dirname = os.path.dirname(filename)
if isbin or islog:
extension = "bin"
else:
extension = "tlog"
file_header = ''
while True:
m = mlog.recv_match()
if m is None:
break
if (isbin or islog) and m.get_type() in ["FMT", "PARM", "CMD"]:
file_header += m.get_msgbuf()
if (isbin or islog) and m.get_type() == 'MSG' and m.Message.startswith("Ardu"):
file_header += m.get_msgbuf()
if m.get_type() in ['PARAM_VALUE','MISSION_ITEM']:
timestamp = getattr(m, '_timestamp', None)
file_header += struct.pack('>Q', timestamp*1.0e6) + m.get_msgbuf()
if not mavutil.evaluate_condition(args.condition, mlog.messages):
continue
if mlog.flightmode.upper() == args.mode.upper():
if output is None:
path = os.path.join(dirname, "%s%u.%s" % (args.mode, count, extension))
count += 1
print("Creating %s" % path)
output = open(path, mode='wb')
output.write(file_header)
else:
if output is not None:
output.close()
output = None
if output and m.get_type() != 'BAD_DATA':
timestamp = getattr(m, '_timestamp', None)
if not isbin:
output.write(struct.pack('>Q', timestamp*1.0e6))
output.write(m.get_msgbuf())
for filename in args.logs:
process(filename)
| gpl-3.0 | -3,411,939,758,737,294,000 | 31.75641 | 116 | 0.580039 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.