repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tedelhourani/ansible
|
lib/ansible/modules/system/setup.py
|
49
|
7733
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
gather_subset:
version_added: "2.1"
description:
- "if supplied, restrict the additional facts collected to the given subset.
Possible values: all, min, hardware, network, virtual, ohai, and
facter Can specify a list of values to specify a larger subset.
Values can also be used with an initial C(!) to specify that
that specific subset should not be collected. For instance:
!hardware, !network, !virtual, !ohai, !facter. If !all is specified
then only the min subset is collected. To avoid collecting even the
min subset, specify !all and !min subsets. To collect only specific facts,
use !all, !min, and specify the particular fact subsets.
Use the filter parameter if you do not want to display some collected
facts."
required: false
default: 'all'
gather_timeout:
version_added: "2.2"
description:
- "Set the default timeout in seconds for individual fact gathering"
required: false
default: 10
filter:
version_added: "1.1"
description:
- if supplied, only return facts that match this shell-style (fnmatch) wildcard.
required: false
default: '*'
fact_path:
version_added: "1.3"
description:
- path used for local ansible facts (*.fact) - files in this dir
will be run (if executable) and their results be added to ansible_local facts
if a file is not executable it is read. Check notes for Windows options. (from 2.1 on)
File/results format can be json or ini-format
required: false
default: '/etc/ansible/facts.d'
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
- This module is also supported for Windows targets.
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(facter) and M(ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
C(filter) as this is provided by a simpler implementation of the module.
- If the target host is Windows you can now use C(fact_path). Make sure that this path
exists on the target host. Files in this path MUST be PowerShell scripts (``*.ps1``) and
their output must be formattable in JSON (Ansible will take care of this). Test the
output of your scripts.
This option was added in Ansible 2.1.
- This module is also supported for Windows targets.
author:
- "Ansible Core Team"
- "Michael DeHaan"
- "David O'Brien @david_obrien davidobrien1985"
'''
EXAMPLES = """
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
# ansible all -m setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
# ansible all -m setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
# ansible all -m setup -a 'filter=facter_*'
# Collect only facts returned by facter.
# ansible all -m setup -a 'gather_subset=!all,!any,facter'
# Display only facts about certain interfaces.
# ansible all -m setup -a 'filter=ansible_eth[0-2]'
# Restrict additional gathered facts to network and virtual (includes default minimum facts)
# ansible all -m setup -a 'gather_subset=network,virtual'
# Collect only network and virtual (excludes default minimum facts)
# ansible all -m setup -a 'gather_subset=!all,!any,network,virtual'
# Do not call puppet facter or ohai even if present.
# ansible all -m setup -a 'gather_subset=!facter,!ohai'
# Only collect the default minimum amount of facts:
# ansible all -m setup -a 'gather_subset=!all'
# Collect no facts, even the default minimum subset of facts:
# ansible all -m setup -a 'gather_subset=!all,!min'
# Display facts from Windows hosts with custom facts stored in C(C:\\custom_facts).
# ansible windows -m setup -a "fact_path='c:\\custom_facts'"
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts import ansible_collector
from ansible.module_utils.facts import default_collectors
def main():
module = AnsibleModule(
argument_spec=dict(
gather_subset=dict(default=["all"], required=False, type='list'),
gather_timeout=dict(default=10, required=False, type='int'),
filter=dict(default="*", required=False),
fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode=True,
)
gather_subset = module.params['gather_subset']
gather_timeout = module.params['gather_timeout']
filter_spec = module.params['filter']
# TODO: this mimics existing behavior where gather_subset=["!all"] actually means
# to collect nothing except for the below list
# TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
# some tweaking on how gather_subset operations are performed
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local', 'lsb',
'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# rename namespace_name to root_key?
namespace = PrefixFactNamespace(namespace_name='ansible',
prefix='ansible_')
fact_collector = \
ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
facts_dict = fact_collector.collect(module=module)
module.exit_json(ansible_facts=facts_dict)
if __name__ == '__main__':
main()
|
gpl-3.0
|
stacywsmith/ansible
|
test/units/parsing/test_dataloader.py
|
47
|
3823
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.errors import yaml_strings
from ansible.parsing.dataloader import DataLoader
class TestDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = (b"""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = (b"""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = (b"""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
@patch('ansible.errors.AnsibleError._get_error_lines_from_file')
@patch.object(DataLoader, '_get_file_contents')
def test_tab_error(self, mock_def, mock_get_error_lines):
mock_def.return_value = (u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""", True)
mock_get_error_lines.return_value = ('''\tblip: baz''', '''..foo: bar''')
with self.assertRaises(AnsibleParserError) as cm:
self._loader.load_from_file('dummy_yaml_text.txt')
self.assertIn(yaml_strings.YAML_COMMON_LEADING_TAB_ERROR, str(cm.exception))
self.assertIn('foo: bar', str(cm.exception))
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data.encode('utf-8'))):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
|
gpl-3.0
|
mechacoin/mechacoin
|
qa/rpc-tests/python-bitcoinrpc/bitcoinrpc/authproxy.py
|
305
|
5784
|
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return round(o, 8)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self.__service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self.__service_name is not None:
name = "%s.%s" % (self.__service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self.__service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self.__service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
response = self._get_response()
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
log.debug("--> "+postdata)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
return self._get_response()
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
else:
log.debug("<-- "+responsedata)
return response
|
mit
|
ah744/ScaffCC_RKQC
|
rkqc/tools/gui/items/EquivalenceCheckItem.py
|
3
|
3266
|
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import Qt, QSize
from PyQt4.QtGui import QBrush, QTableWidgetItem
from revkit import equivalence_check
from core.BaseItem import *
from ui.DesignerWidget import DesignerWidget
from ui.EquivalenceCheck import Ui_EquivalenceCheck
class EquivalenceCheck( DesignerWidget ):
def __init__( self, parent = None ):
DesignerWidget.__init__( self, Ui_EquivalenceCheck, parent )
def clear( self ):
self.tableWidget.clearContents()
self.tableWidget.setRowCount( 0 )
def addRow( self, circ1, circ2, equivalent ):
self.tableWidget.setRowCount( self.tableWidget.rowCount() + 1 )
row = self.tableWidget.rowCount() - 1
self.tableWidget.setItem( row, 0, QTableWidgetItem( circ1.circuit_name ) )
self.tableWidget.setItem( row, 1, QTableWidgetItem( circ1.circuit_name ) )
self.tableWidget.setItem( row, 2, QTableWidgetItem( str( equivalent ) ) )
for column in range( 3 ):
self.tableWidget.item( row, column ).setFlags( Qt.ItemIsSelectable | Qt.ItemIsEnabled )
self.tableWidget.item( row, 2 ).setForeground( QBrush( Qt.green if equivalent else Qt.red ) )
self.tableWidget.item( row, 2 ).setTextAlignment( Qt.AlignVCenter | Qt.AlignHCenter )
@item( "Equivalence Checking",
iconname = "checkbox",
requires = [ "Circuit", "Circuit" ],
widget = { 'class': EquivalenceCheck, 'size': (500, 400) } )
class EquivalenceCheckItem( BaseItem ):
"""This items provide the SAT-based equivalence checker. It gets two circuits and returns <i>equivalent</i> if both circuits realizing the same function. The equivalence checker supports different configurations of constant inputs and garbage outputs in the considered circuits. If more than one benchmark, e.g. when using <i>Path Benchmarks</i>, a detailed overview is given when enlarging the item."""
def onCreate( self ):
self.setText( "Equivalence Checking" )
self.setState( self.CONFIGURED )
def initialize( self ):
self.equivalent = True
self.widget.clear()
def executeEvent( self, inputs ):
r = equivalence_check( inputs[0], inputs[1] )
if type( r ) == dict:
self.widget.addRow( inputs[0], inputs[1], r['equivalent'] )
self.equivalent = self.equivalent and r['equivalent']
self.setText( "Equivalent" if self.equivalent else "Not equivalent" )
else:
return r
return []
|
bsd-2-clause
|
fidomason/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/colorama/initialise.py
|
484
|
1297
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def reinit():
sys.stdout = wrapped_stdout
sys.stderr = wrapped_stdout
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
|
lgpl-3.0
|
benrudolph/commcare-hq
|
corehq/apps/hqpillow_retry/filters.py
|
1
|
1993
|
from collections import defaultdict
from django.db.models.aggregates import Count
from corehq.apps.reports.filters.base import BaseSingleOptionFilter, CheckboxFilter, BaseDrilldownOptionFilter
from django.utils.translation import ugettext_noop as _
from pillow_retry.models import PillowError
class PillowErrorFilter(BaseDrilldownOptionFilter):
slug = 'pillow_error'
label = _('Filter errors')
@property
def drilldown_map(self):
def err_item(val, name, val_count, next_list=None):
ret = {
'val': val,
'text': '{} ({})'.format(name, val_count)
}
if next_list:
ret['next'] = next_list
return ret
data = PillowError.objects.values('pillow', 'error_type').annotate(num_errors=Count('id'))
data_map = defaultdict(list)
pillow_counts = defaultdict(lambda: 0)
for row in data:
pillow = row['pillow']
error = row['error_type']
count = row['num_errors']
data_map[pillow].append(err_item(error, error, count))
pillow_counts[pillow] += count
return [
err_item(pillow, pillow.split('.')[-1], pillow_counts[pillow], errors)
for pillow, errors in data_map.items()
]
@classmethod
def get_labels(cls):
return [
(_('Pillow Class'), 'Select pillow...', 'pillow'),
(_("Error Type"), 'Select error...', 'error'),
]
class DatePropFilter(BaseSingleOptionFilter):
slug = 'date_prop'
label = _("Filter by")
default_text = _("Filter date by ...")
@property
def options(self):
return [
('date_created', 'Date Created'),
('date_last_attempt', 'Date of Last Attempt'),
('date_next_attempt', 'Date of Next Attempt'),
]
class AttemptsFilter(CheckboxFilter):
slug = 'filter_attempts'
label = _("Show only records with max attempts")
|
bsd-3-clause
|
tumbl3w33d/ansible
|
lib/ansible/modules/network/ios/ios_ping.py
|
18
|
5820
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ios_ping
short_description: Tests reachability using ping from Cisco IOS network devices
description:
- Tests reachability using ping from switch to a remote destination.
- For a general purpose network module, see the M(net_ping) module.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
extends_documentation_fragment: ios
options:
count:
description:
- Number of packets to send.
default: 5
dest:
description:
- The IP Address or hostname (resolvable by switch) of the remote node.
required: true
source:
description:
- The source IP Address.
state:
description:
- Determines if the expected result is success or fail.
choices: [ absent, present ]
default: present
vrf:
description:
- The VRF to use for forwarding.
default: default
notes:
- For a general purpose network module, see the M(net_ping) module.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
'''
EXAMPLES = r'''
- name: Test reachability to 10.10.10.10 using default vrf
ios_ping:
dest: 10.10.10.10
- name: Test reachability to 10.20.20.20 using prod vrf
ios_ping:
dest: 10.20.20.20
vrf: prod
- name: Test unreachability to 10.30.30.30 using default vrf
ios_ping:
dest: 10.30.30.30
state: absent
- name: Test reachability to 10.40.40.40 using prod vrf and setting count and source
ios_ping:
dest: 10.40.40.40
source: loopback0
vrf: prod
count: 20
'''
RETURN = '''
commands:
description: Show the command sent.
returned: always
type: list
sample: ["ping vrf prod 10.40.40.40 count 20 source loopback0"]
packet_loss:
description: Percentage of packets lost.
returned: always
type: str
sample: "0%"
packets_rx:
description: Packets successfully received.
returned: always
type: int
sample: 20
packets_tx:
description: Packets successfully transmitted.
returned: always
type: int
sample: 20
rtt:
description: Show RTT stats.
returned: always
type: dict
sample: {"avg": 2, "max": 8, "min": 1}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.ios import run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec
import re
def main():
""" main entry point for module execution
"""
argument_spec = dict(
count=dict(type="int"),
dest=dict(type="str", required=True),
source=dict(type="str"),
state=dict(type="str", choices=["absent", "present"], default="present"),
vrf=dict(type="str")
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec)
count = module.params["count"]
dest = module.params["dest"]
source = module.params["source"]
vrf = module.params["vrf"]
warnings = list()
results = {}
if warnings:
results["warnings"] = warnings
results["commands"] = [build_ping(dest, count, source, vrf)]
ping_results = run_commands(module, commands=results["commands"])
ping_results_list = ping_results[0].split("\n")
stats = ""
for line in ping_results_list:
if line.startswith('Success'):
stats = line
success, rx, tx, rtt = parse_ping(stats)
loss = abs(100 - int(success))
results["packet_loss"] = str(loss) + "%"
results["packets_rx"] = int(rx)
results["packets_tx"] = int(tx)
# Convert rtt values to int
for k, v in rtt.items():
if rtt[k] is not None:
rtt[k] = int(v)
results["rtt"] = rtt
validate_results(module, loss, results)
module.exit_json(**results)
def build_ping(dest, count=None, source=None, vrf=None):
"""
Function to build the command to send to the terminal for the switch
to execute. All args come from the module's unique params.
"""
if vrf is not None:
cmd = "ping vrf {0} {1}".format(vrf, dest)
else:
cmd = "ping {0}".format(dest)
if count is not None:
cmd += " repeat {0}".format(str(count))
if source is not None:
cmd += " source {0}".format(source)
return cmd
def parse_ping(ping_stats):
"""
Function used to parse the statistical information from the ping response.
Example: "Success rate is 100 percent (5/5), round-trip min/avg/max = 1/2/8 ms"
Returns the percent of packet loss, received packets, transmitted packets, and RTT dict.
"""
rate_re = re.compile(r"^\w+\s+\w+\s+\w+\s+(?P<pct>\d+)\s+\w+\s+\((?P<rx>\d+)/(?P<tx>\d+)\)")
rtt_re = re.compile(r".*,\s+\S+\s+\S+\s+=\s+(?P<min>\d+)/(?P<avg>\d+)/(?P<max>\d+)\s+\w+\s*$|.*\s*$")
rate = rate_re.match(ping_stats)
rtt = rtt_re.match(ping_stats)
return rate.group("pct"), rate.group("rx"), rate.group("tx"), rtt.groupdict()
def validate_results(module, loss, results):
"""
This function is used to validate whether the ping results were unexpected per "state" param.
"""
state = module.params["state"]
if state == "present" and loss == 100:
module.fail_json(msg="Ping failed unexpectedly", **results)
elif state == "absent" and loss < 100:
module.fail_json(msg="Ping succeeded unexpectedly", **results)
if __name__ == "__main__":
main()
|
gpl-3.0
|
xzregg/yunwei
|
yw/views/fileobj.py
|
2
|
8961
|
# coding:utf-8
from ..Global import *
import os
import sys
import glob
import re
import json
import time
from ..models import fileobj, hosts
from .control import GetCmdRet
def GetFileObjPath(name, ishost=True):
#from ..Global import FileObjectsDir
obj = 'hosts' if ishost else 'groups'
path = os.path.join(FileObjectsDir, obj, name)
if not os.path.isdir(path):
os.mkdir(path)
return path
def Iptables(request):
'''
防火墙设置
'''
gid, group, hid, ip = GetPost(request, ['gid', 'group', 'hid', 'ip'])
isgroup, unitsid, mark = (
False, hid, ip) if hid and ip else (True, gid, group)
filename = u'Iptables'
rpath = '/etc/iptables.sh'
# 检查是否有记录cron,没就创建
o, create = fileobj.objects.get_or_create(
unitsid=unitsid, filename=filename)
if create: # 新创建
if hid:
edittype = False
bcmd = ''
acmd = 'iptables -nvL'
port = hosts.GetHost('id', hid).port
else: # 主机组默认为追加
port = 22
edittype = True
bcmd = u"sed -i '/^#<yw_%s>/,/^#<\/yw_%s>/d' %s" % (mark,
mark, rpath)
acmd = u"iptables -nvL && sed -n '/^#<yw_%s>/,/^#<\/yw_%s>/p' %s" % (mark,
mark, rpath)
o.isgroup, o.rpath, o.edittype, o.bcmd, o.acmd = isgroup, rpath, edittype, bcmd, acmd
# 默认的防火墙脚本
o.text = u'''
#!/bin/sh
/sbin/modprobe ip_tables
/sbin/modprobe iptable_filter
/sbin/modprobe iptable_nat
/sbin/modprobe ip_conntrack
/sbin/modprobe ip_conntrack_ftp
#flush
iptables -F;iptables -X;iptables -t nat -F
iptables -P OUTPUT ACCEPT
iptables -N firewall
iptables -A INPUT -j firewall
iptables -A FORWARD -j firewall
#允许访问外部
iptables -A firewall -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A firewall -i lo -j ACCEPT
#ssh
#iptables -A firewall -p tcp --dport %s -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
''' % port
o.save()
NewRequest = Myrequest(request)
rP = NewRequest.POST
rP['fid'] = str(o.id)
rP['rpath'] = o.rpath
return FileOBJ(NewRequest)
def Cron(request):
'''
计划任务
'''
gid, group, hid, ip = GetPost(request, ['gid', 'group', 'hid', 'ip'])
isgroup, unitsid, mark = (
False, hid, ip) if hid and ip else (True, gid, group)
filename = u'Cron'
rpath = '/tmp/yw_%s_cron' % mark
# 检查是否有记录cron,没就创建
o, create = fileobj.objects.get_or_create(
unitsid=unitsid, filename=filename)
if create: # 新创建
if hid:
edittype = False
Gbcmd = ''
Gacmd = ''
else: # 主机组默认为追加
edittype = True
Gbcmd = u"&& sed -i '/^#<yw_%s>/,/^#<\/yw_%s>/d' %s" % (mark,
mark, rpath)
Gacmd = u''
# Gacmd=u"&& sed -n '/^#<yw_%s>/,/^#<\/yw_%s>/p'
# %s"%(mark,mark,rpath)
bcmd = 'crontab -l > %s ' % rpath + Gbcmd
acmd = 'crontab %s && crontab -l ' % rpath + Gacmd
o.isgroup, o.rpath, o.edittype, o.bcmd, o.acmd = isgroup, rpath, edittype, bcmd, acmd
o.save()
NewRequest = Myrequest(request)
rP = NewRequest.POST
rP['fid'] = str(o.id)
rP['rpath'] = o.rpath
return FileOBJ(NewRequest)
def FileOBJ(request):
'''
文件对象
'''
stime = time.time()
Random = int(time.time() * 100)
hid, ip, gid, group, HostsId, fid, action, filename, rpath, edittype, FileObjText, AfterCMD, BeforeCMD = GetPost(
request,
['hid', 'ip', 'gid', 'group', 'checkbox', 'fid', 'action', 'filename', 'rpath', 'edittype', 'FileObjText', 'AfterCMD', 'BeforeCMD'], [4])
if fid:
o = fileobj.objects.get(id=fid)
HostsId = [x.split('__')[-1]
for x in HostsId if x] if not hid else [hid]
mark = ip or group
if action == "Del":
o.delete()
return HttpResponse(u'%s[%s]删除成功!' % (o.filename, o.rpath))
elif action == "Modify" and request.method == "POST": # 保存到数据库
edittype = True if edittype == "append" else False
o.filename = filename
o.edittype = edittype
o.rpath = rpath
o.text = FileObjText.encode('utf-8')
o.bcmd = BeforeCMD
o.acmd = AfterCMD
o.save()
return HttpResponse(u'[%s]保存成功!' % filename)
elif action == "Sync" and request.method == "POST": # 写文件
FileObjText = FileObjText.replace('\r\n', '\n')
if edittype == "append":
FileObjText = u'\n#<yw_%s>\n' % mark + \
FileObjText + u'\n#</yw_%s>' % mark
return HttpResponse(GetCmdRet('FileOBJ', HostsId, group, 0, 30, (edittype, rpath, FileObjText.encode('utf-8'), BeforeCMD, AfterCMD)))
else: # 读取
EditType = 1 if o.edittype else 0
isEdit = True
if hid:
Ho = hosts.GetHost('id', hid)
HostObj = Host(Ho.ip, Ho.port, Ho.user,
Ho.password, Ho.sshtype, 10)
# 主机直接读远程的
c, FileObjText, t = HostObj.FileOBJ(
'read', o.rpath, '', o.bcmd)
etime = time.time()
UseTime = u'读用时: %.1f 秒' % (etime - stime)
else:
FileObjText = o.text
Urlopt = u'?hid=%s&ip=%s' % (
hid, ip) if hid and ip else u'?gid=%s&group=%s' % (gid, group)
button = [
[u'保存本地', MakeURL(FileOBJ) + Urlopt + '&action=Modify&fid=%s&rpath=%s' %
(fid, rpath), 'load', u'保存到数据!', 'center', 3],
[u'同步远程', MakeURL(FileOBJ) + Urlopt + '&action=Sync&fid=%s&rpath=%s' %
(fid, rpath), 'load', u'同步对端!', 'center']
]
return render_to_response('AddFileOBJ.html', locals())
def AddFileOBJ(request):
'''
增加文件对象
'''
Random = int(time.time() * 100)
hid, ip, gid, group, filename, rpath, edittype = GetPost(request,
['hid', 'ip', 'gid', 'group', 'filename', 'rpath', 'edittype'])
if ((hid and ip) or (gid and group)) and request.method == "POST" and filename and rpath and edittype:
isgroup, unitsid, mark = (
False, hid, ip) if hid and ip else (True, gid, group)
if edittype == "append":
edittype = True
bcmd = "sed -i '/^#<yw_%s>/,/^#<\/yw_%s>/d' %s" % (mark,
mark, rpath)
acmd = "sed -n '/^#<yw_%s>/,/^#<\/yw_%s>/p' %s" % (mark,
mark, rpath)
else:
edittype, bcmd, acmd = (False, '', '')
if fileobj.objects.filter(unitsid=unitsid, filename=filename):
return HttpResponse(u'[%s]已存在!' % filename)
o = fileobj.objects.create(isgroup=isgroup, unitsid=unitsid,
filename=filename, edittype=edittype, rpath=rpath, bcmd=bcmd, acmd=acmd)
NewRequest = Myrequest()
rP = NewRequest.POST
rP['hid'], rP['ip'], rP['gid'], rP['group'] = hid, ip, gid, group
rP['fid'] = str(o.id)
rP['rpath'] = o.rpath
return FileOBJ(NewRequest)
Urlopt = u'?hid=%s&ip=%s' % (
hid, ip) if hid and ip else u'?gid=%s&group=%s' % (gid, group)
button = [[u'增加', MakeURL(AddFileOBJ) + Urlopt, 'load'],
]
return render_to_response('AddFileOBJ.html', locals())
def ShowFileOBJ(request):
'''
主机文件对象
'''
Random = int(time.time() * 100)
gid, group, hid, ip = GetPost(request, ['gid', 'group', 'hid', 'ip'])
unitsid = gid or hid
Urlopt = u'?hid=%s&ip=%s' % (
hid, ip) if hid and ip else u'?gid=%s&group=%s' % (gid, group)
FileObjs = []
for o in fileobj.objects.filter(unitsid=unitsid):
FileObjs.append(
MakeAjaxButton(
'a', MakeURL(FileOBJ) + Urlopt + '&fid=%s&rpath=%s' %
(o.id, o.rpath), o.filename, 'load', Random, '', 'center'))
FileObjs.append(
MakeAjaxButton(
'a', MakeURL(FileOBJ) + Urlopt + '&fid=%s&rpath=%s&action=Del' %
(o.id, o.rpath), u'删除', 'load', Random, u'删除', 'center', 3))
T = MakeTable(['', ''], FileObjs)
button = [[u'增加文件对象', MakeURL(AddFileOBJ) + Urlopt, 'load'],
]
return render_to_response('ShowFileOBJ.html', locals())
|
lgpl-3.0
|
inonit/django-chemtrails
|
tests/contrib/permissions/test_utils.py
|
1
|
33686
|
# -*- coding: utf-8 -*-
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import QuerySet
from django.test import TestCase
from chemtrails.contrib.permissions import utils
from chemtrails.contrib.permissions.exceptions import MixedContentTypeError
from chemtrails.contrib.permissions.models import AccessRule
from chemtrails.neoutils import get_node_for_object, get_nodeset_for_queryset
from tests.testapp.autofixtures import Author, AuthorFixture, Book, BookFixture, Store, StoreFixture
from tests.utils import flush_nodes, clear_neo4j_model_nodes
User = get_user_model()
class GetIdentityTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.get_identity()``.
"""
def test_get_identity_anonymous_user(self):
user = AnonymousUser()
try:
utils.get_identity(user)
self.fail('Did not raise NotImplementedError when checking with AnonymousUser.')
except NotImplementedError as e:
self.assertEqual(str(e), 'Implement support for AnonymousUser, please!')
@flush_nodes()
def test_get_identity_user(self):
user = User.objects.create_user(username='testuser', password='test123.')
self.assertEqual(utils.get_identity(user), (user, None))
@flush_nodes()
def test_get_identity_group(self):
group = Group.objects.create(name='mygroup')
self.assertEqual(utils.get_identity(group), (None, group))
class GetContentTypeTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.get_content_type()``.
"""
def test_get_content_type_from_class(self):
self.assertEqual(utils.get_content_type(User),
ContentType.objects.get_for_model(User))
@flush_nodes()
def test_get_content_type_from_instance(self):
user = User.objects.create_user(username='testuser', password='test123.')
self.assertEqual(utils.get_content_type(user),
ContentType.objects.get_for_model(User))
class CheckPermissionsAppLabelTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.check_permissions_app_label()``.
"""
@flush_nodes()
def test_check_permissions_app_label_single(self):
perm = 'testapp.add_book'
book = BookFixture(Book).create_one()
self.assertEqual(utils.check_permissions_app_label(perm),
(utils.get_content_type(book), {'add_book'}))
self.assertEqual(utils.check_permissions_app_label(perm),
(utils.get_content_type(Book), {'add_book'}))
@flush_nodes()
def test_check_permissions_app_label_invalid_fails(self):
perm = 'testapp.invalid_permission'
self.assertRaisesMessage(
ContentType.DoesNotExist, 'ContentType matching query does not exist.',
utils.check_permissions_app_label, permissions=perm)
@flush_nodes()
def test_check_permissions_app_label_sequence(self):
perms = ['testapp.add_book', 'testapp.change_book']
book = BookFixture(Book).create_one()
ctype, codenames = utils.check_permissions_app_label(perms)
self.assertEqual(ctype, utils.get_content_type(book))
self.assertEqual(sorted(codenames), ['add_book', 'change_book'])
def test_check_permissions_app_label_sequence_fails(self):
perms = ['testapp.add_book', 'auth.add_user']
self.assertRaisesMessage(
MixedContentTypeError, ('Given permissions must have the same app label '
'(testapp != auth).'),
utils.check_permissions_app_label, permissions=perms)
perms = ['testapp.add_book', 'testapp.add_store']
self.assertRaisesMessage(
MixedContentTypeError, ('Calculated content type from permission "testapp.add_store" '
'store does not match <ContentType: book>.'),
utils.check_permissions_app_label, permissions=perms)
class GetUsersWithPermsTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.get_users_with_perms()``.
"""
def setUp(self):
clear_neo4j_model_nodes()
self.book = BookFixture(Book, generate_m2m={'authors': (2, 2)}).create_one()
self.user1, self.user2 = User.objects.earliest('pk'), User.objects.latest('pk')
self.group = Group.objects.create(name='group')
def tearDown(self):
clear_neo4j_model_nodes()
def test_invalid_single_perm(self):
perms = ['add_user']
self.assertRaisesMessage(
MixedContentTypeError, ('Calculated content type from permission "add_user" '
'does not match <ContentType: book>.'),
utils.get_users_with_perms, obj=self.book, permissions=perms
)
def test_invalid_multiple_perms(self):
perms = ['add_user', 'view_book']
self.assertRaisesMessage(
MixedContentTypeError, ('One or more permissions "add_user, view_book" from calculated '
'content type does not match <ContentType: book>.'),
utils.get_users_with_perms, obj=self.book, permissions=perms
)
def test_no_perms(self):
queryset = utils.get_users_with_perms(self.book, permissions=[])
self.assertEqual(set(queryset), set())
def test_no_perms_with_superusers(self):
self.user1.is_superuser = True
self.user1.save()
queryset = utils.get_users_with_perms(self.book, permissions=[], with_superusers=True)
self.assertEqual(set(queryset), {self.user1})
def test_single_perm(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Book),
relation_types=[{'AUTHOR': None},
{'BOOK': None}])
perm = Permission.objects.get(content_type__app_label='testapp', codename='view_book')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
users = utils.get_users_with_perms(obj=self.book, permissions='view_book')
self.assertEqual(set(users), {self.user1})
def test_multiple_perms(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Book),
relation_types=[{'AUTHOR': None},
{'BOOK': None}])
perms = Permission.objects.filter(content_type__app_label='testapp', codename__in=['view_book', 'change_book'])
access_rule.permissions.add(*perms)
self.user1.user_permissions.add(*perms)
self.user2.user_permissions.add(perms.get(codename='change_book')) # Should not be in result set
users = utils.get_users_with_perms(obj=self.book, permissions=['change_book', 'view_book'])
self.assertEqual(set(users), {self.user1})
def test_get_relation_types_definition_index_variable(self):
book = BookFixture(Book, generate_m2m={'authors': (2, 2)}).create_one()
get_nodeset_for_queryset(Store.objects.filter(pk=book.pk), sync=True)
user = User.objects.filter(pk__in=book.authors.values('user')).latest('pk')
perm = Permission.objects.get(content_type__app_label='auth', codename='change_user')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(User),
relation_types=[
{'AUTHOR': None},
{'BOOK': None},
{'{0:AUTHORS}': {'pk': '{source}.pk'}}, # '{source}.pk' will be ignored
{'USER': None}
])
access_rule.permissions.add(perm)
user.user_permissions.add(perm)
queryset = utils.get_users_with_perms(user, 'auth.change_user')
self.assertEqual({user}, set(queryset))
class GetObjectsForUserTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.get_objects_for_user()``.
"""
def setUp(self):
clear_neo4j_model_nodes()
BookFixture(Book, generate_m2m={'authors': (2, 2)}).create_one()
self.user1, self.user2 = User.objects.earliest('pk'), User.objects.latest('pk')
self.group = Group.objects.create(name='group')
def tearDown(self):
clear_neo4j_model_nodes()
def test_superuser(self):
self.user1.is_superuser = True
queryset = Book.objects.all()
objects = utils.get_objects_for_user(self.user1, ['testapp.change_book'], queryset)
self.assertEqual(set(queryset), set(objects))
def test_with_superuser_true(self):
self.user1.is_superuser = True
queryset = Book.objects.all()
objects = utils.get_objects_for_user(self.user1,
['testapp.change_book'], queryset, with_superuser=True)
self.assertEqual(set(queryset), set(objects))
def test_with_superuser_false(self):
BookFixture(Book, follow_fk=True, generate_m2m={'authors': (1, 1)}).create(count=2)
user = User.objects.latest('pk')
user.is_superuser = True
# `with_superuser=False` requires defined access rules - should yield no results!
self.assertEqual(set(Book.objects.none()),
set(utils.get_objects_for_user(
user, ['testapp.change_book'], Book.objects.all(), with_superuser=False)))
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(user),
ctype_target=utils.get_content_type(Book),
relation_types=[
{'AUTHOR': None},
{'BOOK': None}
])
perm = Permission.objects.get(content_type__app_label='testapp', codename='change_book')
access_rule.permissions.add(perm)
user.user_permissions.add(perm)
objects = utils.get_objects_for_user(user,
['testapp.change_book'], Book.objects.all(), with_superuser=False)
self.assertEqual(set(user.author.book_set.all()), set(objects))
def test_anonymous(self):
user = AnonymousUser()
queryset = Book.objects.all()
objects = utils.get_objects_for_user(user,
['testapp.change_book'], queryset)
self.assertEqual(set(Book.objects.none()), set(objects))
def test_nonexistent_source_node(self):
user = User.objects.create_user(username='testuser')
node = get_node_for_object(user).sync()
node.delete()
objects = utils.get_objects_for_user(user, ['testapp.add_book'])
self.assertEqual(set(objects), set(Book.objects.none()))
def test_mixed_permissions(self):
codenames = [
'testapp.change_book',
'testapp.change_store'
]
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user, self.user1, codenames)
def test_mixed_app_label_permissions(self):
codenames = [
'testapp.change_book',
'auth.change_user'
]
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user, self.user1, codenames)
def test_mixed_ctypes_no_klass(self):
codenames = [
'testapp.change_book',
'auth.change_user'
]
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user, self.user1, codenames)
def test_mixed_ctypes_with_klass(self):
codenames = [
'testapp.change_book',
'auth.change_user'
]
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user, self.user1, codenames, Book)
def test_no_app_label_or_klass(self):
self.assertRaises(ValueError, utils.get_objects_for_user, self.user1, ['change_book'])
def test_empty_permissions_sequence(self):
objects = utils.get_objects_for_user(self.user1, [], Book.objects.all())
self.assertEqual(set(objects), set())
def test_permissions_single(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(self.group)
self.assertEqual(
set(self.user1.groups.all()),
set(utils.get_objects_for_user(self.user1, 'auth.change_group'))
)
self.assertEqual(
set(self.user1.groups.all()),
set(utils.get_objects_for_user(self.user1, ['auth.change_group']))
)
def test_klass_as_model(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(self.group)
objects = utils.get_objects_for_user(self.user1,
['auth.change_group'], Group)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_manager(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(self.group)
objects = utils.get_objects_for_user(self.user1,
['auth.change_group'], Group.objects)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(self.group)
objects = utils.get_objects_for_user(self.user1,
['auth.change_group'], Group.objects.all())
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = utils.get_objects_for_user(self.user1, ['auth.change_group'])
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(objects.model, Group)
def test_single_permission_to_check(self):
groups = Group.objects.bulk_create([Group(name=name) for name in ['group1', 'group2', 'group3']])
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(*groups)
objects = utils.get_objects_for_user(self.user1, 'auth.change_group')
self.assertEqual(len(groups), len(objects))
self.assertEqual(set(groups), set(objects))
def test_multiple_permissions_to_check(self):
groups = Group.objects.bulk_create([Group(name=name) for name in ['group1', 'group2', 'group3']])
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
add_perm = Permission.objects.get(content_type__app_label='auth', codename='add_group')
change_perm = Permission.objects.get(content_type__app_label='auth', codename='change_group')
access_rule.permissions.add(*[add_perm, change_perm])
self.user1.user_permissions.add(*[add_perm, change_perm])
self.user1.groups.add(*groups)
objects = utils.get_objects_for_user(self.user1, ['auth.add_group', 'auth.change_group'])
self.assertEqual(len(groups), len(objects))
self.assertEqual(set(groups), set(objects))
def test_multiple_permissions_to_check_requires_staff(self):
groups = Group.objects.bulk_create([Group(name=name) for name in ['group1', 'group2', 'group3']])
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
requires_staff=True,
relation_types=[{'GROUPS': None}])
perms = Permission.objects.filter(content_type__app_label='auth', codename__in=['add_group', 'delete_group'])
access_rule.permissions.add(*perms)
self.user1.user_permissions.add(*perms)
self.user1.groups.add(*groups)
self.user1.is_staff = True
get_node_for_object(self.user1).sync() # Sync node in order to write `is_staff` property
objects = utils.get_objects_for_user(self.user1, ['auth.add_group', 'auth.delete_group'])
self.assertEqual(set(groups), set(objects))
self.user2.user_permissions.add(*perms)
self.user2.groups.add(*groups)
self.assertFalse(self.user2.is_staff)
objects = utils.get_objects_for_user(self.user2, ['auth.add_group', 'auth.delete_group'])
self.assertEqual(set(), set(objects))
def test_multiple_permissions_to_check_use_groups(self):
self.group.permissions.add(Permission.objects.get(content_type__app_label='auth', codename='add_group'))
self.user1.user_permissions.add(Permission.objects.get(content_type__app_label='auth', codename='change_group'))
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
access_rule.permissions.add(*Permission.objects.filter(content_type__app_label='auth',
codename__in=['add_group', 'change_group']))
self.user1.groups.add(self.group)
objects = utils.get_objects_for_user(self.user1,
['auth.add_group', 'auth.change_group'], use_groups=True)
self.assertEqual(set(self.user1.groups.all()), set(objects))
self.user1.groups.remove(self.group)
objects = utils.get_objects_for_user(self.user1,
['auth.add_group', 'auth.change_group'], use_groups=False)
self.assertEqual(set(), set(objects))
def test_extra_perms_single(self):
group = Group.objects.create(name='a group')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
access_rule.permissions.add(Permission.objects.get(content_type__app_label='auth', codename='add_group'))
self.user1.groups.add(group)
objects = utils.get_objects_for_user(self.user1, 'auth.add_group')
self.assertEqual(set(), set(objects))
objects = utils.get_objects_for_user(self.user1, 'auth.add_group', extra_perms='auth.add_group')
self.assertEqual({group}, set(objects))
def test_extra_perms_sequence(self):
group = Group.objects.create(name='a group')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
access_rule.permissions.add(*Permission.objects.filter(content_type__app_label='auth',
codename__in=['add_group', 'change_group']))
self.user1.groups.add(group)
objects = utils.get_objects_for_user(self.user1, 'auth.add_group')
self.assertEqual(set(), set(objects))
objects = utils.get_objects_for_user(self.user1, 'auth.add_group',
extra_perms=['auth.add_group', 'auth.change_group'])
self.assertEqual({group}, set(objects))
def test_extra_perms_single_mixed_ctype(self):
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user,
self.user1, 'auth.add_user', extra_perms='testapp.change_store')
def test_extra_perms_sequence_mixed_ctype(self):
codenames = [
'testapp.change_book',
'testapp.change_store'
]
self.assertRaises(MixedContentTypeError, utils.get_objects_for_user,
self.user1, 'auth.add_user', extra_perms=codenames)
def test_any_permissions(self):
groups = Group.objects.bulk_create([Group(name=name) for name in ['group1', 'group2', 'group3']])
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': None}])
perms = Permission.objects.filter(content_type__app_label='auth', codename__in=['add_group', 'change_group'])
access_rule.permissions.add(*perms)
self.user1.user_permissions.add(*perms)
self.user1.groups.add(*groups)
objects = utils.get_objects_for_user(self.user1, ['auth.add_group', 'auth.delete_group'], any_perm=False)
self.assertEqual(set(), set(objects))
objects = utils.get_objects_for_user(self.user1, ['auth.add_group', 'auth.delete_group'], any_perm=True)
self.assertEqual(set(groups), set(objects))
def test_relation_types_target_props(self):
groups = Group.objects.bulk_create([Group(name=name) for name in ['group1', 'group2']])
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(Group),
relation_types=[{'GROUPS': {'name': 'group1'}}])
perm = Permission.objects.get(content_type__app_label='auth', codename='add_group')
access_rule.permissions.add(perm)
self.user1.user_permissions.add(perm)
self.user1.groups.add(*groups)
objects = utils.get_objects_for_user(self.user1, 'auth.add_group')
self.assertEqual({Group.objects.get(name='group1')}, set(objects))
def test_relation_types_definition_source_variable(self):
book = BookFixture(Book, generate_m2m={'authors': (2, 2)}).create_one()
get_nodeset_for_queryset(Store.objects.filter(pk=book.pk), sync=True)
user = User.objects.filter(pk__in=book.authors.values('user')).latest('pk')
perm = Permission.objects.get(content_type__app_label='auth', codename='change_user')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(User),
relation_types=[
{'AUTHOR': None},
{'BOOK': None},
{'AUTHORS': None},
{'USER': {
'pk': '{source}.pk',
'username': '{source}.username'
}}
])
access_rule.permissions.add(perm)
user.user_permissions.add(perm)
objects = utils.get_objects_for_user(user, 'auth.change_user')
self.assertEqual({user}, set(objects))
self.assertNotEqual(User.objects.count(), objects.count())
def test_relation_types_definition_index_variable(self):
book = BookFixture(Book, generate_m2m={'authors': (2, 2)}).create_one()
get_nodeset_for_queryset(Store.objects.filter(pk=book.pk), sync=True)
user = User.objects.filter(pk__in=book.authors.values('user')).latest('pk')
perm = Permission.objects.get(content_type__app_label='auth', codename='change_user')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(User),
ctype_target=utils.get_content_type(User),
relation_types=[
{'AUTHOR': None},
{'BOOK': None},
{'{0:AUTHORS}': None},
{'USER': None}
])
access_rule.permissions.add(perm)
user.user_permissions.add(perm)
objects = utils.get_objects_for_user(user, 'auth.change_user')
self.assertEqual({user}, set(objects))
self.assertNotEqual(User.objects.count(), objects.count())
class GetObjectsForGroupTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.get_objects_for_group()``.
"""
pass
class GraphPermissionCheckerTestCase(TestCase):
"""
Testing ``chemtrails.contrib.permissions.utils.GraphPermissionChecker`` class.
"""
@flush_nodes()
def test_checker_has_perm_inactive_user(self):
user = User.objects.create_user(username='testuser', password='test123.', is_active=False)
checker = utils.GraphPermissionChecker(user)
self.assertFalse(checker.has_perm(perm=None, obj=None))
@flush_nodes()
def test_checker_has_perm_is_superuser(self):
user = User.objects.create_user(username='testuser', password='test123.', is_superuser=True)
checker = utils.GraphPermissionChecker(user)
self.assertTrue(checker.has_perm(perm=None, obj=None))
@flush_nodes()
def test_get_user_filters(self):
user = User.objects.create_user(username='testuser', password='test123.')
user.user_permissions.add(*Permission.objects.filter(codename__in=['add_user', 'change_user']))
# Get user filters for user
checker = utils.GraphPermissionChecker(user)
filters = checker.get_user_filters()
permissions = Permission.objects.filter(**filters)
self.assertIsInstance(permissions, QuerySet)
self.assertEqual(permissions.count(), 2)
@flush_nodes()
def test_get_user_perms(self):
user = User.objects.create_user(username='testuser', password='test123.')
user.user_permissions.add(*Permission.objects.filter(codename__in=['add_user', 'change_user']))
checker = utils.GraphPermissionChecker(user)
self.assertListEqual(sorted(list(checker.get_user_perms(user))), ['add_user', 'change_user'])
@flush_nodes()
def test_get_group_filters(self):
group = Group.objects.create(name='test group')
group.permissions.add(*Permission.objects.filter(codename__in=['add_user', 'change_user']))
user = User.objects.create_user(username='testuser', password='test123.')
user.groups.add(group)
# Get group filters for group
checker = utils.GraphPermissionChecker(group)
filters = checker.get_group_filters()
permissions = Permission.objects.filter(**filters)
self.assertIsInstance(permissions, QuerySet)
self.assertEqual(permissions.count(), 2)
# Get group filters for use
checker = utils.GraphPermissionChecker(user)
filters = checker.get_group_filters()
permissions = Permission.objects.filter(**filters)
self.assertIsInstance(permissions, QuerySet)
self.assertEqual(permissions.count(), 2)
@flush_nodes()
def test_get_group_perms(self):
group = Group.objects.create(name='test group')
group.permissions.add(*Permission.objects.filter(codename__in=['add_user', 'change_user']))
user = User.objects.create_user(username='testuser', password='test123.')
checker = utils.GraphPermissionChecker(group)
self.assertListEqual(sorted(list(checker.get_group_perms(user))), ['add_user', 'change_user'])
@flush_nodes()
def test_get_perms_user_is_inactive(self):
user = User.objects.create_user(username='testuser', password='test123.', is_active=False)
checker = utils.GraphPermissionChecker(user)
self.assertListEqual(checker.get_perms(user), [])
def test_get_perms_user_is_superuser(self):
user = User.objects.create_user(username='testuser', password='test123.', is_superuser=True)
checker = utils.GraphPermissionChecker(user)
self.assertListEqual(sorted(checker.get_perms(user)), ['add_user', 'change_user', 'delete_user'])
@flush_nodes()
def test_get_perms_user_in_group(self):
group = Group.objects.create(name='test group')
group.permissions.add(Permission.objects.get(codename='add_user'))
user = User.objects.create_user(username='testuser', password='test123.')
user.user_permissions.add(Permission.objects.get(codename='change_user'))
user.groups.add(group)
# Make sure we get user and group permissions combined
checker = utils.GraphPermissionChecker(user)
self.assertListEqual(sorted(checker.get_perms(user)), ['add_user', 'change_user'])
@flush_nodes()
def test_get_perms_group(self):
group = Group.objects.create(name='test group')
group.permissions.add(Permission.objects.get(codename='add_group'))
checker = utils.GraphPermissionChecker(group)
self.assertListEqual(sorted(checker.get_perms(group)), ['add_group'])
@flush_nodes()
def test_checker_has_perm_authorized_user(self):
author = AuthorFixture(Author).create_one()
user = author.user
perm = Permission.objects.get(content_type=utils.get_content_type(author), codename='change_author')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(user),
ctype_target=utils.get_content_type(author),
relation_types=[{'AUTHOR': None}])
user.user_permissions.add(perm)
access_rule.permissions.add(perm)
checker = utils.GraphPermissionChecker(user)
self.assertTrue(checker.has_perm(perm.codename, author))
@flush_nodes()
def test_checker_has_perm_authorized_group(self):
group = Group.objects.create(name='test group')
user = User.objects.create_user(username='testuser', password='test123.')
perm = Permission.objects.get(content_type=utils.get_content_type(user), codename='change_user')
access_rule = AccessRule.objects.create(ctype_source=utils.get_content_type(group),
ctype_target=utils.get_content_type(user),
relation_types=[{'USER_SET': None}])
user.groups.add(group)
group.permissions.add(perm)
access_rule.permissions.add(perm)
checker = utils.GraphPermissionChecker(group)
# self.assertTrue(checker.has_perm(perm.codename, user))
self.assertRaises(NotImplementedError, checker.has_perm, perm.codename, user)
|
mit
|
ianyh/heroku-buildpack-python-opencv
|
vendor/.heroku/lib/python2.7/lib2to3/tests/data/py2_test_grammar.py
|
285
|
30980
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
mit
|
stevekuznetsov/ansible
|
contrib/inventory/rackhd.py
|
77
|
2361
|
#!/usr/bin/env python
import json
import requests
import os
import argparse
import types
RACKHD_URL = 'http://localhost:8080'
class RackhdInventory(object):
def __init__(self, nodeids):
self._inventory = {}
for nodeid in nodeids:
self._load_inventory_data(nodeid)
inventory = {}
for nodeid,info in self._inventory.items():
inventory[nodeid]= (self._format_output(nodeid, info))
print(json.dumps(inventory))
def _load_inventory_data(self, nodeid):
info = {}
info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid )
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
results = {}
for key,url in info.items():
r = requests.get( url, verify=False)
results[key] = r.text
self._inventory[nodeid] = results
def _format_output(self, nodeid, info):
try:
node_info = json.loads(info['lookup'])
ipaddress = ''
if len(node_info) > 0:
ipaddress = node_info[0]['ipAddress']
output = { 'hosts':[ipaddress],'vars':{}}
for key,result in info.items():
output['vars'][key] = json.loads(result)
output['vars']['ansible_ssh_user'] = 'monorail'
except KeyError:
pass
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
return parser.parse_args()
try:
#check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
except:
#use default values
pass
# Use the nodeid specified in the environment to limit the data returned
# or return data for all available nodes
nodeids = []
if (parse_args().host):
try:
nodeids += parse_args().host.split(',')
RackhdInventory(nodeids)
except:
pass
if (parse_args().list):
try:
url = RACKHD_URL + '/api/common/nodes'
r = requests.get( url, verify=False)
data = json.loads(r.text)
for entry in data:
if entry['type'] == 'compute':
nodeids.append(entry['id'])
RackhdInventory(nodeids)
except:
pass
|
gpl-3.0
|
1tush/reviewboard
|
reviewboard/webapi/tests/test_file_diff_comment.py
|
10
|
5219
|
from __future__ import unicode_literals
from django.utils import six
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import filediff_comment_list_mimetype
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.urls import get_filediff_comment_list_url
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ReviewRequestChildListMixin, BaseWebAPITestCase):
"""Testing the FileDiffCommentResource list APIs."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = \
'review-requests/<id>/diffs/<revision>/files/<id>/diff-comments/'
resource = resources.filediff_comment
def setup_review_request_child_test(self, review_request):
if not review_request.repository_id:
# The group tests don't create a repository by default.
review_request.repository = self.create_repository()
review_request.save()
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
self.create_review(review_request, publish=True)
return (get_filediff_comment_list_url(filediff),
filediff_comment_list_mimetype)
def setup_http_not_allowed_list_test(self, user):
review_request = self.create_review_request(create_repository=True,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
return get_filediff_comment_list_url(filediff)
def compare_item(self, item_rsp, filediff):
self.assertEqual(item_rsp['id'], filediff.pk)
self.assertEqual(item_rsp['text'], filediff.text)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
if populate_items:
review = self.create_review(review_request, publish=True)
items = [
self.create_diff_comment(review, filediff),
]
else:
items = []
return (get_filediff_comment_list_url(filediff, local_site_name),
filediff_comment_list_mimetype,
items)
def test_get_as_anonymous(self):
"""Testing the
GET review-requests/<id>/diffs/<revision>/files/<id>/diff-comments/ API
as an anonymous user
"""
diff_comment_text = 'Sample comment.'
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
comment = self.create_diff_comment(review, filediff,
text=diff_comment_text)
self.client.logout()
rsp = self.api_get(get_filediff_comment_list_url(filediff),
expected_mimetype=filediff_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(len(rsp['diff_comments']), 1)
self.assertEqual(rsp['diff_comments'][0]['text'], comment.text)
def test_get_with_line(self):
"""Testing the
GET review-requests/<id>/diffs/<revision>/files/<id>/diff-comments/ API
with ?line=
"""
diff_comment_text = 'Sample comment.'
diff_comment_line = 10
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
self.create_diff_comment(review, filediff,
text=diff_comment_text,
first_line=diff_comment_line)
self.create_diff_comment(review, filediff,
first_line=diff_comment_line + 1)
rsp = self.api_get(get_filediff_comment_list_url(filediff), {
'line': diff_comment_line,
}, expected_mimetype=filediff_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(len(rsp['diff_comments']), 1)
self.assertEqual(rsp['diff_comments'][0]['text'], diff_comment_text)
self.assertEqual(rsp['diff_comments'][0]['first_line'],
diff_comment_line)
# Satisfy the linter check. This resource is a list only, and doesn't
# support items.
ResourceItemTests = None
|
mit
|
SINGROUP/pycp2k
|
pycp2k/classes/_each162.py
|
1
|
1114
|
from pycp2k.inputsection import InputSection
class _each162(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
|
lgpl-3.0
|
tony/kivy
|
kivy/input/provider.py
|
23
|
1082
|
'''
Motion Event Provider
=====================
Abstract class for the implementation of a
:class:`~kivy.input.motionevent.MotionEvent`
provider. The implementation must support the
:meth:`~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and
:meth:`~MotionEventProvider.update` methods.
'''
__all__ = ('MotionEventProvider', )
class MotionEventProvider(object):
'''Base class for a provider.
'''
def __init__(self, device, args):
self.device = device
if self.__class__ == MotionEventProvider:
raise NotImplementedError('class MotionEventProvider is abstract')
def start(self):
'''Start the provider. This method is automatically called when the
application is started and if the configuration uses the current
provider.
'''
pass
def stop(self):
'''Stop the provider.
'''
pass
def update(self, dispatch_fn):
'''Update the provider and dispatch all the new touch events though the
`dispatch_fn` argument.
'''
pass
|
mit
|
Flexget/Flexget
|
flexget/plugins/daemon/web_server.py
|
3
|
3903
|
from loguru import logger
from flexget.api import api_app
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.ui.v1 import register_web_ui as register_web_ui_v1
from flexget.ui.v2 import register_web_ui as register_web_ui_v2
from flexget.utils.tools import get_config_hash
from flexget.webserver import get_secret, register_app, setup_server
logger = logger.bind(name="web_server_daemon")
config_hash = ''
web_server = None
web_config_schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'integer', 'minimum': 0, 'maximum': 65536},
{
'type': 'object',
'properties': {
'bind': {'type': 'string', 'format': 'ipv4'},
'port': {'type': 'integer', 'minimum': 0, 'maximum': 65536},
'ssl_certificate': {'type': 'string'},
'ssl_private_key': {'type': 'string'},
'web_ui': {'type': 'boolean'},
'base_url': {'type': 'string'},
'run_v2': {
'type': 'boolean',
'deprecated': 'v2 is registered by default if web_ui: true so `run_v2` is now redundant. To run v1 alongside, use the `run_v1`.',
},
'run_v1': {'type': 'boolean'},
},
'additionalProperties': False,
'dependencies': {
'ssl_certificate': ['ssl_private_key'],
'ssl_private_key': ['ssl_certificate'],
},
},
]
}
def prepare_config(config):
if not config:
return
if isinstance(config, bool):
config = {}
if isinstance(config, int):
config = {'port': config}
config.setdefault('bind', '0.0.0.0')
config.setdefault('port', 5050)
config.setdefault('ssl_certificate', None)
config.setdefault('ssl_private_key', None)
config.setdefault('web_ui', True)
config.setdefault('base_url', '')
config.setdefault('run_v2', False)
config.setdefault('run_v1', False)
if config['base_url']:
if not config['base_url'].startswith('/'):
config['base_url'] = '/' + config['base_url']
if config['base_url'].endswith('/'):
config['base_url'] = config['base_url'][:-1]
return config
@event('config.register')
def register_config():
register_config_key('web_server', web_config_schema)
@event('manager.config_updated')
@event('manager.daemon.started')
def register_web_server(manager):
"""Registers Web Server and loads API (always) and WebUi via config"""
global web_server, config_hash
if not manager.is_daemon:
return
config = manager.config.get('web_server')
if get_config_hash(config) == config_hash:
logger.debug('web server config has\'nt changed')
return
config_hash = get_config_hash(config)
web_server_config = prepare_config(config)
# Removes any existing web server instances if exists
stop_server(manager)
if not web_server_config:
return
logger.info(
'Running web server at IP {}:{}', web_server_config['bind'], web_server_config['port']
)
# Register API
api_app.secret_key = get_secret()
logger.info("Initiating API")
register_app('/api', api_app, 'API')
# Register WebUI
if web_server_config.get('web_ui'):
if web_server_config.get('run_v1'):
logger.info('Registering WebUI v1')
register_web_ui_v1(manager)
logger.info('Registering WebUI v2')
register_web_ui_v2(web_server_config)
web_server = setup_server(web_server_config)
@event('manager.shutdown')
def stop_server(manager):
""" Sets up and starts/restarts the webui. """
global web_server
if not manager.is_daemon:
return
if web_server and web_server.is_alive():
web_server.stop()
web_server = None
|
mit
|
ghmajx/asuswrt-merlin
|
release/src/router/samba-3.6.13/source4/scripting/python/samba/tests/samba3.py
|
20
|
8485
|
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.samba3."""
from samba.samba3 import (GroupMappingDatabase, Registry, PolicyDatabase,
SecretsDatabase, TdbSam)
from samba.samba3 import (WinsDatabase, SmbpasswdFile, ACB_NORMAL,
IdmapDatabase, SAMUser, ParamFile)
from samba.tests import TestCase
import os
for p in [ "../../../../../testdata/samba3", "../../../../testdata/samba3" ]:
DATADIR = os.path.join(os.path.dirname(__file__), p)
if os.path.exists(DATADIR):
break
class RegistryTestCase(TestCase):
def setUp(self):
super(RegistryTestCase, self).setUp()
self.registry = Registry(os.path.join(DATADIR, "registry.tdb"))
def tearDown(self):
self.registry.close()
super(RegistryTestCase, self).tearDown()
def test_length(self):
self.assertEquals(28, len(self.registry))
def test_keys(self):
self.assertTrue("HKLM" in self.registry.keys())
def test_subkeys(self):
self.assertEquals(["SOFTWARE", "SYSTEM"], self.registry.subkeys("HKLM"))
def test_values(self):
self.assertEquals({'DisplayName': (1L, 'E\x00v\x00e\x00n\x00t\x00 \x00L\x00o\x00g\x00\x00\x00'),
'ErrorControl': (4L, '\x01\x00\x00\x00')},
self.registry.values("HKLM/SYSTEM/CURRENTCONTROLSET/SERVICES/EVENTLOG"))
class PolicyTestCase(TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
self.policy = PolicyDatabase(os.path.join(DATADIR, "account_policy.tdb"))
def test_policy(self):
self.assertEquals(self.policy.min_password_length, 5)
self.assertEquals(self.policy.minimum_password_age, 0)
self.assertEquals(self.policy.maximum_password_age, 999999999)
self.assertEquals(self.policy.refuse_machine_password_change, 0)
self.assertEquals(self.policy.reset_count_minutes, 0)
self.assertEquals(self.policy.disconnect_time, -1)
self.assertEquals(self.policy.user_must_logon_to_change_password, None)
self.assertEquals(self.policy.password_history, 0)
self.assertEquals(self.policy.lockout_duration, 0)
self.assertEquals(self.policy.bad_lockout_minutes, None)
class GroupsTestCase(TestCase):
def setUp(self):
super(GroupsTestCase, self).setUp()
self.groupdb = GroupMappingDatabase(os.path.join(DATADIR, "group_mapping.tdb"))
def tearDown(self):
self.groupdb.close()
super(GroupsTestCase, self).tearDown()
def test_group_length(self):
self.assertEquals(13, len(list(self.groupdb.groupsids())))
def test_get_group(self):
self.assertEquals((-1, 5L, 'Administrators', ''), self.groupdb.get_group("S-1-5-32-544"))
def test_groupsids(self):
sids = list(self.groupdb.groupsids())
self.assertTrue("S-1-5-32-544" in sids)
def test_alias_length(self):
self.assertEquals(0, len(list(self.groupdb.aliases())))
class SecretsDbTestCase(TestCase):
def setUp(self):
super(SecretsDbTestCase, self).setUp()
self.secretsdb = SecretsDatabase(os.path.join(DATADIR, "secrets.tdb"))
def tearDown(self):
self.secretsdb.close()
super(SecretsDbTestCase, self).tearDown()
def test_get_sid(self):
self.assertTrue(self.secretsdb.get_sid("BEDWYR") is not None)
class TdbSamTestCase(TestCase):
def setUp(self):
super(TdbSamTestCase, self).setUp()
self.samdb = TdbSam(os.path.join(DATADIR, "passdb.tdb"))
def tearDown(self):
self.samdb.close()
super(TdbSamTestCase, self).tearDown()
def test_usernames(self):
self.assertEquals(3, len(list(self.samdb.usernames())))
def test_getuser(self):
user = SAMUser("root")
user.logoff_time = 2147483647
user.kickoff_time = 2147483647
user.pass_can_change_time = 1125418267
user.username = "root"
user.uid = None
user.lm_password = 'U)\x02\x03\x1b\xed\xe9\xef\xaa\xd3\xb45\xb5\x14\x04\xee'
user.nt_password = '\x87\x8d\x80\x14`l\xda)gzD\xef\xa15?\xc7'
user.acct_ctrl = 16
user.pass_last_set_time = 1125418267
user.fullname = "root"
user.nt_username = ""
user.logoff_time = 2147483647
user.acct_desc = ""
user.group_rid = 1001
user.logon_count = 0
user.bad_password_count = 0
user.domain = "BEDWYR"
user.munged_dial = ""
user.workstations = ""
user.user_rid = 1000
user.kickoff_time = 2147483647
user.logoff_time = 2147483647
user.unknown_6 = 1260L
user.logon_divs = 0
user.hours = [True for i in range(168)]
other = self.samdb["root"]
for name in other.__dict__:
if other.__dict__[name] != user.__dict__[name]:
print "%s: %r != %r" % (name, other.__dict__[name], user.__dict__[name])
self.assertEquals(user, other)
class WinsDatabaseTestCase(TestCase):
def setUp(self):
super(WinsDatabaseTestCase, self).setUp()
self.winsdb = WinsDatabase(os.path.join(DATADIR, "wins.dat"))
def test_length(self):
self.assertEquals(22, len(self.winsdb))
def test_first_entry(self):
self.assertEqual((1124185120, ["192.168.1.5"], 0x64), self.winsdb["ADMINISTRATOR#03"])
def tearDown(self):
self.winsdb.close()
super(WinsDatabaseTestCase, self).tearDown()
class SmbpasswdTestCase(TestCase):
def setUp(self):
super(SmbpasswdTestCase, self).setUp()
self.samdb = SmbpasswdFile(os.path.join(DATADIR, "smbpasswd"))
def test_length(self):
self.assertEquals(3, len(self.samdb))
def test_get_user(self):
user = SAMUser("rootpw")
user.lm_password = "552902031BEDE9EFAAD3B435B51404EE"
user.nt_password = "878D8014606CDA29677A44EFA1353FC7"
user.acct_ctrl = ACB_NORMAL
user.pass_last_set_time = int(1125418267)
user.uid = 0
self.assertEquals(user, self.samdb["rootpw"])
def tearDown(self):
self.samdb.close()
super(SmbpasswdTestCase, self).tearDown()
class IdmapDbTestCase(TestCase):
def setUp(self):
super(IdmapDbTestCase, self).setUp()
self.idmapdb = IdmapDatabase(os.path.join(DATADIR,
"winbindd_idmap.tdb"))
def test_user_hwm(self):
self.assertEquals(10000, self.idmapdb.get_user_hwm())
def test_group_hwm(self):
self.assertEquals(10002, self.idmapdb.get_group_hwm())
def test_uids(self):
self.assertEquals(1, len(list(self.idmapdb.uids())))
def test_gids(self):
self.assertEquals(3, len(list(self.idmapdb.gids())))
def test_get_user_sid(self):
self.assertEquals("S-1-5-21-58189338-3053988021-627566699-501", self.idmapdb.get_user_sid(65534))
def test_get_group_sid(self):
self.assertEquals("S-1-5-21-2447931902-1787058256-3961074038-3007", self.idmapdb.get_group_sid(10001))
def tearDown(self):
self.idmapdb.close()
super(IdmapDbTestCase, self).tearDown()
class ParamTestCase(TestCase):
def test_init(self):
file = ParamFile()
self.assertTrue(file is not None)
def test_add_section(self):
file = ParamFile()
file.add_section("global")
self.assertTrue(file["global"] is not None)
def test_set_param_string(self):
file = ParamFile()
file.add_section("global")
file.set_string("data", "bar")
self.assertEquals("bar", file.get_string("data"))
def test_get_section(self):
file = ParamFile()
self.assertEquals(None, file.get_section("unknown"))
self.assertRaises(KeyError, lambda: file["unknown"])
|
gpl-2.0
|
rmfitzpatrick/ansible
|
lib/ansible/modules/network/netscaler/netscaler_ssl_certkey.py
|
27
|
11840
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_ssl_certkey
short_description: Manage ssl cerificate keys.
description:
- Manage ssl cerificate keys.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
certkey:
description:
- >-
Name for the certificate and private-key pair. Must begin with an ASCII alphanumeric or underscore
C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ),
colon C(:), at C(@), equals C(=), and hyphen C(-) characters. Cannot be changed after the certificate-key
pair is created.
- "The following requirement applies only to the NetScaler CLI:"
- >-
If the name includes one or more spaces, enclose the name in double or single quotation marks (for
example, "my cert" or 'my cert').
- "Minimum length = 1"
cert:
description:
- >-
Name of and, optionally, path to the X509 certificate file that is used to form the certificate-key
pair. The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
key:
description:
- >-
Name of and, optionally, path to the private-key file that is used to form the certificate-key pair.
The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
password:
description:
- >-
Passphrase that was used to encrypt the private-key. Use this option to load encrypted private-keys
in PEM format.
inform:
choices:
- 'DER'
- 'PEM'
- 'PFX'
description:
- >-
Input format of the certificate and the private-key files. The three formats supported by the
appliance are:
- "PEM - Privacy Enhanced Mail"
- "DER - Distinguished Encoding Rule"
- "PFX - Personal Information Exchange."
passplain:
description:
- >-
Pass phrase used to encrypt the private-key. Required when adding an encrypted private-key in PEM
format.
- "Minimum length = 1"
expirymonitor:
choices:
- 'enabled'
- 'disabled'
description:
- "Issue an alert when the certificate is about to expire."
notificationperiod:
description:
- >-
Time, in number of days, before certificate expiration, at which to generate an alert that the
certificate is about to expire.
- "Minimum value = C(10)"
- "Maximum value = C(100)"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup ssl certkey
delegate_to: localhost
netscaler_ssl_certkey:
nitro_user: nsroot
nitro_pass: nsroot
nsip: 172.18.0.2
certkey: certirificate_1
cert: server.crt
key: server.key
expirymonitor: enabled
notificationperiod: 30
inform: PEM
password: False
passplain: somesecret
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcertkey import sslcertkey
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection
def key_exists(client, module):
log('Checking if key exists')
log('certkey is %s' % module.params['certkey'])
all_certificates = sslcertkey.get(client)
certkeys = [item.certkey for item in all_certificates]
if module.params['certkey'] in certkeys:
return True
else:
return False
def key_identical(client, module, sslcertkey_proxy):
log('Checking if configured key is identical')
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
diff_dict = sslcertkey_proxy.diff_object(sslcertkey_list[0])
if 'password' in diff_dict:
del diff_dict['password']
if 'passplain' in diff_dict:
del diff_dict['passplain']
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, sslcertkey_proxy):
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
return sslcertkey_proxy.diff_object(sslcertkey_list[0])
def main():
module_specific_arguments = dict(
certkey=dict(type='str'),
cert=dict(type='str'),
key=dict(type='str'),
password=dict(type='bool'),
inform=dict(
type='str',
choices=[
'DER',
'PEM',
'PFX',
]
),
passplain=dict(
type='str',
no_log=True,
),
expirymonitor=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
notificationperiod=dict(type='float'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
'expirymonitor',
'notificationperiod',
]
readonly_attrs = [
'signaturealg',
'certificatetype',
'serial',
'issuer',
'clientcertnotbefore',
'clientcertnotafter',
'daystoexpiration',
'subject',
'publickey',
'publickeysize',
'version',
'priority',
'status',
'passcrypt',
'data',
'servicename',
]
immutable_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
]
transforms = {
'expirymonitor': [lambda v: v.upper()],
}
# Instantiate config proxy
sslcertkey_proxy = ConfigProxy(
actual=sslcertkey(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
if module.params['state'] == 'present':
log('Applying actions for state present')
if not key_exists(client, module):
if not module.check_mode:
log('Adding certificate key')
sslcertkey_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not key_identical(client, module, sslcertkey_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(sslcertkey_proxy, diff_list(client, module, sslcertkey_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, sslcertkey_proxy),
**module_result
)
if not module.check_mode:
sslcertkey_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not key_exists(client, module):
module.fail_json(msg='SSL certkey does not exist')
if not key_identical(client, module, sslcertkey_proxy):
module.fail_json(msg='SSL certkey differs from configured', diff=diff_list(client, module, sslcertkey_proxy))
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if key_exists(client, module):
if not module.check_mode:
sslcertkey_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if key_exists(client, module):
module.fail_json(msg='SSL certkey still exists')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
gpl-3.0
|
sergiorua/libcloud
|
libcloud/compute/drivers/voxel.py
|
58
|
10960
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Voxel VoxCloud driver
"""
import datetime
import hashlib
from libcloud.utils.py3 import b
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
VOXEL_API_HOST = "api.voxel.net"
class VoxelResponse(XmlResponse):
def __init__(self, response, connection):
self.parsed = None
super(VoxelResponse, self).__init__(response=response,
connection=connection)
def parse_body(self):
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
return self.parsed
def parse_error(self):
err_list = []
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
for err in self.parsed.findall('err'):
code = err.get('code')
err_list.append("(%s) %s" % (code, err.get('msg')))
# From voxel docs:
# 1: Invalid login or password
# 9: Permission denied: user lacks access rights for this method
if code == "1" or code == "9":
# sucks, but only way to detect
# bad authentication tokens so far
raise InvalidCredsError(err_list[-1])
return "\n".join(err_list)
def success(self):
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
stat = self.parsed.get('stat')
if stat != "ok":
return False
return True
class VoxelConnection(ConnectionUserAndKey):
"""
Connection class for the Voxel driver
"""
host = VOXEL_API_HOST
responseCls = VoxelResponse
def add_default_params(self, params):
params = dict([(k, v) for k, v in list(params.items())
if v is not None])
params["key"] = self.user_id
params["timestamp"] = datetime.datetime.utcnow().isoformat() + "+0000"
keys = list(params.keys())
keys.sort()
md5 = hashlib.md5()
md5.update(b(self.key))
for key in keys:
if params[key]:
if not params[key] is None:
md5.update(b("%s%s" % (key, params[key])))
else:
md5.update(b(key))
params['api_sig'] = md5.hexdigest()
return params
VOXEL_INSTANCE_TYPES = {}
RAM_PER_CPU = 2048
NODE_STATE_MAP = {
'IN_PROGRESS': NodeState.PENDING,
'QUEUED': NodeState.PENDING,
'SUCCEEDED': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED,
'unknown': NodeState.UNKNOWN,
}
class VoxelNodeDriver(NodeDriver):
"""
Voxel VoxCLOUD node driver
"""
connectionCls = VoxelConnection
type = Provider.VOXEL
name = 'Voxel VoxCLOUD'
website = 'http://www.voxel.net/'
def _initialize_instance_types():
for cpus in range(1, 14):
if cpus == 1:
name = "Single CPU"
else:
name = "%d CPUs" % cpus
id = "%dcpu" % cpus
ram = cpus * RAM_PER_CPU
VOXEL_INSTANCE_TYPES[id] = {
'id': id,
'name': name,
'ram': ram,
'disk': None,
'bandwidth': None,
'price': None}
features = {"create_node": [],
"list_sizes": ["variable_disk"]}
_initialize_instance_types()
def list_nodes(self):
params = {"method": "voxel.devices.list"}
result = self.connection.request('/', params=params).object
return self._to_nodes(result)
def list_sizes(self, location=None):
return [NodeSize(driver=self.connection.driver, **i)
for i in list(VOXEL_INSTANCE_TYPES.values())]
def list_images(self, location=None):
params = {"method": "voxel.images.list"}
result = self.connection.request('/', params=params).object
return self._to_images(result)
def create_node(self, **kwargs):
"""Create Voxel Node
:keyword name: the name to assign the node (mandatory)
:type name: ``str``
:keyword image: distribution to deploy
:type image: :class:`NodeImage`
:keyword size: the plan size to create (mandatory)
Requires size.disk (GB) to be set manually
:type size: :class:`NodeSize`
:keyword location: which datacenter to create the node in
:type location: :class:`NodeLocation`
:keyword ex_privateip: Backend IP address to assign to node;
must be chosen from the customer's
private VLAN assignment.
:type ex_privateip: ``str``
:keyword ex_publicip: Public-facing IP address to assign to node;
must be chosen from the customer's
public VLAN assignment.
:type ex_publicip: ``str``
:keyword ex_rootpass: Password for root access; generated if unset.
:type ex_rootpass: ``str``
:keyword ex_consolepass: Password for remote console;
generated if unset.
:type ex_consolepass: ``str``
:keyword ex_sshuser: Username for SSH access
:type ex_sshuser: ``str``
:keyword ex_sshpass: Password for SSH access; generated if unset.
:type ex_sshpass: ``str``
:keyword ex_voxel_access: Allow access Voxel administrative access.
Defaults to False.
:type ex_voxel_access: ``bool``
:rtype: :class:`Node` or ``None``
"""
# assert that disk > 0
if not kwargs["size"].disk:
raise ValueError("size.disk must be non-zero")
# convert voxel_access to string boolean if needed
voxel_access = kwargs.get("ex_voxel_access", None)
if voxel_access is not None:
voxel_access = "true" if voxel_access else "false"
params = {
'method': 'voxel.voxcloud.create',
'hostname': kwargs["name"],
'disk_size': int(kwargs["size"].disk),
'facility': kwargs["location"].id,
'image_id': kwargs["image"].id,
'processing_cores': kwargs["size"].ram / RAM_PER_CPU,
'backend_ip': kwargs.get("ex_privateip", None),
'frontend_ip': kwargs.get("ex_publicip", None),
'admin_password': kwargs.get("ex_rootpass", None),
'console_password': kwargs.get("ex_consolepass", None),
'ssh_username': kwargs.get("ex_sshuser", None),
'ssh_password': kwargs.get("ex_sshpass", None),
'voxel_access': voxel_access,
}
object = self.connection.request('/', params=params).object
if self._getstatus(object):
return Node(
id=object.findtext("device/id"),
name=kwargs["name"],
state=NODE_STATE_MAP[object.findtext("device/status")],
public_ips=kwargs.get("publicip", None),
private_ips=kwargs.get("privateip", None),
driver=self.connection.driver
)
else:
return None
def reboot_node(self, node):
params = {'method': 'voxel.devices.power',
'device_id': node.id,
'power_action': 'reboot'}
return self._getstatus(
self.connection.request('/', params=params).object)
def destroy_node(self, node):
params = {'method': 'voxel.voxcloud.delete',
'device_id': node.id}
return self._getstatus(
self.connection.request('/', params=params).object)
def list_locations(self):
params = {"method": "voxel.voxcloud.facilities.list"}
result = self.connection.request('/', params=params).object
nodes = self._to_locations(result)
return nodes
def _getstatus(self, element):
status = element.attrib["stat"]
return status == "ok"
def _to_locations(self, object):
return [NodeLocation(element.attrib["label"],
element.findtext("description"),
element.findtext("description"),
self)
for element in object.findall('facilities/facility')]
def _to_nodes(self, object):
nodes = []
for element in object.findall('devices/device'):
if element.findtext("type") == "Virtual Server":
try:
state = self.NODE_STATE_MAP[element.attrib['status']]
except KeyError:
state = NodeState.UNKNOWN
public_ip = private_ip = None
ipassignments = element.findall("ipassignments/ipassignment")
for ip in ipassignments:
if ip.attrib["type"] == "frontend":
public_ip = ip.text
elif ip.attrib["type"] == "backend":
private_ip = ip.text
nodes.append(Node(id=element.attrib['id'],
name=element.attrib['label'],
state=state,
public_ips=public_ip,
private_ips=private_ip,
driver=self.connection.driver))
return nodes
def _to_images(self, object):
images = []
for element in object.findall("images/image"):
images.append(NodeImage(id=element.attrib["id"],
name=element.attrib["summary"],
driver=self.connection.driver))
return images
|
apache-2.0
|
poldracklab/fmriprep
|
fmriprep/cli/parser.py
|
1
|
24895
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Parser."""
import sys
from .. import config
def _build_parser():
"""Build parser object."""
from functools import partial
from pathlib import Path
from argparse import (
ArgumentParser,
ArgumentDefaultsHelpFormatter,
)
from packaging.version import Version
from .version import check_latest, is_flagged
from niworkflows.utils.spaces import Reference, OutputReferencesAction
def _path_exists(path, parser):
"""Ensure a given path exists."""
if path is None or not Path(path).exists():
raise parser.error(f"Path does not exist: <{path}>.")
return Path(path).absolute()
def _is_file(path, parser):
"""Ensure a given path exists and it is a file."""
path = _path_exists(path, parser)
if not path.is_file():
raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.")
return path
def _min_one(value, parser):
"""Ensure an argument is not lower than 1."""
value = int(value)
if value < 1:
raise parser.error("Argument can't be less than one.")
return value
def _to_gb(value):
scale = {"G": 1, "T": 10 ** 3, "M": 1e-3, "K": 1e-6, "B": 1e-9}
digits = "".join([c for c in value if c.isdigit()])
units = value[len(digits):] or "M"
return int(digits) * scale[units[0]]
def _drop_sub(value):
return value[4:] if value.startswith("sub-") else value
def _filter_pybids_none_any(dct):
import bids
return {
k: bids.layout.Query.NONE
if v is None
else (bids.layout.Query.ANY if v == "*" else v)
for k, v in dct.items()
}
def _bids_filter(value):
from json import loads
if value and Path(value).exists():
return loads(Path(value).read_text(), object_hook=_filter_pybids_none_any)
verstr = f"fMRIPrep v{config.environment.version}"
currentv = Version(config.environment.version)
is_release = not any(
(currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)
)
parser = ArgumentParser(
description="fMRIPrep: fMRI PREProcessing workflows v{}".format(
config.environment.version
),
formatter_class=ArgumentDefaultsHelpFormatter,
)
PathExists = partial(_path_exists, parser=parser)
IsFile = partial(_is_file, parser=parser)
PositiveInt = partial(_min_one, parser=parser)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument(
"bids_dir",
action="store",
type=PathExists,
help="the root folder of a BIDS valid dataset (sub-XXXXX folders should "
"be found at the top level in this folder).",
)
parser.add_argument(
"output_dir",
action="store",
type=Path,
help="the output path for the outcomes of preprocessing and visual " "reports",
)
parser.add_argument(
"analysis_level",
choices=["participant"],
help='processing stage to be run, only "participant" in the case of '
"fMRIPrep (see BIDS-Apps specification).",
)
# optional arguments
parser.add_argument("--version", action="version", version=verstr)
g_bids = parser.add_argument_group("Options for filtering BIDS queries")
g_bids.add_argument(
"--skip_bids_validation",
"--skip-bids-validation",
action="store_true",
default=False,
help="assume the input dataset is BIDS compliant and skip the validation",
)
g_bids.add_argument(
"--participant-label",
"--participant_label",
action="store",
nargs="+",
type=_drop_sub,
help="a space delimited list of participant identifiers or a single "
"identifier (the sub- prefix can be removed)",
)
# Re-enable when option is actually implemented
# g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
# help='select a specific session to be processed')
# Re-enable when option is actually implemented
# g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
# help='select a specific run to be processed')
g_bids.add_argument(
"-t", "--task-id", action="store", help="select a specific task to be processed"
)
g_bids.add_argument(
"--echo-idx",
action="store",
type=int,
help="select a specific echo to be processed in a multiecho series",
)
g_bids.add_argument(
"--bids-filter-file",
dest="bids_filters",
action="store",
type=_bids_filter,
metavar="FILE",
help="a JSON file describing custom BIDS input filters using PyBIDS. "
"For further details, please check out "
"https://fmriprep.readthedocs.io/en/%s/faq.html#"
"how-do-I-select-only-certain-files-to-be-input-to-fMRIPrep"
% (currentv.base_version if is_release else "latest"),
)
g_bids.add_argument(
"--anat-derivatives",
action="store",
metavar="PATH",
type=PathExists,
help="Reuse the anatomical derivatives from another fMRIPrep run or calculated "
"with an alternative processing tool (NOT RECOMMENDED).",
)
g_bids.add_argument(
"--bids-database-dir",
metavar="PATH",
type=PathExists,
help="Path to an existing PyBIDS database folder, for faster indexing "
"(especially useful for large datasets)."
)
g_perfm = parser.add_argument_group("Options to handle performance")
g_perfm.add_argument(
"--nprocs",
"--nthreads",
"--n_cpus",
"--n-cpus",
dest='nprocs',
action="store",
type=PositiveInt,
help="maximum number of threads across all processes",
)
g_perfm.add_argument(
"--omp-nthreads",
action="store",
type=PositiveInt,
help="maximum number of threads per-process",
)
g_perfm.add_argument(
"--mem",
"--mem_mb",
"--mem-mb",
dest="memory_gb",
action="store",
type=_to_gb,
help="upper bound memory limit for fMRIPrep processes",
)
g_perfm.add_argument(
"--low-mem",
action="store_true",
help="attempt to reduce memory usage (will increase disk usage "
"in working directory)",
)
g_perfm.add_argument(
"--use-plugin",
"--nipype-plugin-file",
action="store",
metavar="FILE",
type=IsFile,
help="nipype plugin configuration file",
)
g_perfm.add_argument(
"--anat-only", action="store_true", help="run anatomical workflows only"
)
g_perfm.add_argument(
"--boilerplate_only",
action="store_true",
default=False,
help="generate boilerplate only",
)
g_perfm.add_argument(
"--md-only-boilerplate",
action="store_true",
default=False,
help="skip generation of HTML and LaTeX formatted citation with pandoc",
)
g_perfm.add_argument(
"--error-on-aroma-warnings",
action="store_true",
dest="aroma_err_on_warn",
default=False,
help="Raise an error if ICA_AROMA does not produce sensible output "
"(e.g., if all the components are classified as signal or noise)",
)
g_perfm.add_argument(
"-v",
"--verbose",
dest="verbose_count",
action="count",
default=0,
help="increases log verbosity for each occurence, debug level is -vvv",
)
g_conf = parser.add_argument_group("Workflow configuration")
g_conf.add_argument(
"--ignore",
required=False,
action="store",
nargs="+",
default=[],
choices=["fieldmaps", "slicetiming", "sbref", "t2w", "flair"],
help="ignore selected aspects of the input dataset to disable corresponding "
"parts of the workflow (a space delimited list)",
)
g_conf.add_argument(
"--longitudinal",
action="store_true",
help="treat dataset as longitudinal - may increase runtime",
)
g_conf.add_argument(
"--output-spaces",
nargs="*",
action=OutputReferencesAction,
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<SPACE>[:cohort-<label>][:res-<resolution>][...]``, where ``<SPACE>`` is \
a keyword designating a spatial reference, and may be followed by optional, \
colon-separated parameters. \
Non-standard spaces imply specific orientations and sampling grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization. To generate no BOLD outputs, use this option without specifying \
any spatial references. For further details, please check out \
https://fmriprep.readthedocs.io/en/%s/spaces.html"""
% (currentv.base_version if is_release else "latest"),
)
g_conf.add_argument(
"--bold2t1w-init",
action="store",
default="register",
choices=["register", "header"],
help='Either "register" (the default) to initialize volumes at center or "header"'
" to use the header information when coregistering BOLD to T1w images.",
)
g_conf.add_argument(
"--bold2t1w-dof",
action="store",
default=6,
choices=[6, 9, 12],
type=int,
help="Degrees of freedom when registering BOLD to T1w images. "
"6 degrees (rotation and translation) are used by default.",
)
g_conf.add_argument(
"--force-bbr",
action="store_true",
dest="use_bbr",
default=None,
help="Always use boundary-based registration (no goodness-of-fit checks)",
)
g_conf.add_argument(
"--force-no-bbr",
action="store_false",
dest="use_bbr",
default=None,
help="Do not use boundary-based registration (no goodness-of-fit checks)",
)
g_conf.add_argument(
"--medial-surface-nan",
required=False,
action="store_true",
default=False,
help="Replace medial wall values with NaNs on functional GIFTI files. Only "
"performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).",
)
g_conf.add_argument(
"--dummy-scans",
required=False,
action="store",
default=None,
type=int,
help="Number of non steady state volumes.",
)
g_conf.add_argument(
"--random-seed",
dest="_random_seed",
action="store",
type=int,
default=None,
help="Initialize the random seed for the workflow",
)
# ICA_AROMA options
g_aroma = parser.add_argument_group("Specific options for running ICA_AROMA")
g_aroma.add_argument(
"--use-aroma",
action="store_true",
default=False,
help="add ICA_AROMA to your preprocessing stream",
)
g_aroma.add_argument(
"--aroma-melodic-dimensionality",
dest="aroma_melodic_dim",
action="store",
default=-200,
type=int,
help="Exact or maximum number of MELODIC components to estimate "
"(positive = exact, negative = maximum)",
)
# Confounds options
g_confounds = parser.add_argument_group("Specific options for estimating confounds")
g_confounds.add_argument(
"--return-all-components",
dest="regressors_all_comps",
required=False,
action="store_true",
default=False,
help="Include all components estimated in CompCor decomposition in the confounds "
"file instead of only the components sufficient to explain 50 percent of "
"BOLD variance in each CompCor mask",
)
g_confounds.add_argument(
"--fd-spike-threshold",
dest="regressors_fd_th",
required=False,
action="store",
default=0.5,
type=float,
help="Threshold for flagging a frame as an outlier on the basis of framewise "
"displacement",
)
g_confounds.add_argument(
"--dvars-spike-threshold",
dest="regressors_dvars_th",
required=False,
action="store",
default=1.5,
type=float,
help="Threshold for flagging a frame as an outlier on the basis of standardised "
"DVARS",
)
# ANTs options
g_ants = parser.add_argument_group("Specific options for ANTs registrations")
g_ants.add_argument(
"--skull-strip-template",
default="OASIS30ANTs",
type=Reference.from_string,
help="select a template for skull-stripping with antsBrainExtraction",
)
g_ants.add_argument(
"--skull-strip-fixed-seed",
action="store_true",
help="do not use a random seed for skull-stripping - will ensure "
"run-to-run replicability when used with --omp-nthreads 1 and "
"matching --random-seed <int>",
)
g_ants.add_argument(
"--skull-strip-t1w",
action="store",
choices=("auto", "skip", "force"),
default="force",
help="determiner for T1-weighted skull stripping ('force' ensures skull "
"stripping, 'skip' ignores skull stripping, and 'auto' applies brain extraction "
"based on the outcome of a heuristic to check whether the brain is already masked).",
)
# Fieldmap options
g_fmap = parser.add_argument_group("Specific options for handling fieldmaps")
g_fmap.add_argument(
"--fmap-bspline",
action="store_true",
default=False,
help="fit a B-Spline field using least-squares (experimental)",
)
g_fmap.add_argument(
"--fmap-no-demean",
action="store_false",
default=True,
help="do not remove median (within mask) from fieldmap",
)
# SyN-unwarp options
g_syn = parser.add_argument_group("Specific options for SyN distortion correction")
g_syn.add_argument(
"--use-syn-sdc",
action="store_true",
default=False,
help="EXPERIMENTAL: Use fieldmap-free distortion correction",
)
g_syn.add_argument(
"--force-syn",
action="store_true",
default=False,
help="EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to "
"fieldmap correction, if available",
)
# FreeSurfer options
g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing")
g_fs.add_argument(
"--fs-license-file",
metavar="FILE",
type=IsFile,
help="Path to FreeSurfer license key file. Get it (for free) by registering"
" at https://surfer.nmr.mgh.harvard.edu/registration.html",
)
g_fs.add_argument(
"--fs-subjects-dir",
metavar="PATH",
type=Path,
help="Path to existing FreeSurfer subjects directory to reuse. "
"(default: OUTPUT_DIR/freesurfer)",
)
# Surface generation xor
g_surfs = parser.add_argument_group("Surface preprocessing options")
g_surfs.add_argument(
"--no-submm-recon",
action="store_false",
dest="hires",
help="disable sub-millimeter (hires) reconstruction",
)
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument(
"--cifti-output",
nargs="?",
const="91k",
default=False,
choices=("91k", "170k"),
type=str,
help="output preprocessed BOLD as a CIFTI dense timeseries. "
"Optionally, the number of grayordinate can be specified "
"(default is 91k, which equates to 2mm resolution)",
)
g_surfs_xor.add_argument(
"--fs-no-reconall",
action="store_false",
dest="run_reconall",
help="disable FreeSurfer surface preprocessing.",
)
g_other = parser.add_argument_group("Other options")
g_other.add_argument(
"-w",
"--work-dir",
action="store",
type=Path,
default=Path("work").absolute(),
help="path where intermediate results should be stored",
)
g_other.add_argument(
"--clean-workdir",
action="store_true",
default=False,
help="Clears working directory of contents. Use of this flag is not"
"recommended when running concurrent processes of fMRIPrep.",
)
g_other.add_argument(
"--resource-monitor",
action="store_true",
default=False,
help="enable Nipype's resource monitoring to keep track of memory and CPU usage",
)
g_other.add_argument(
"--reports-only",
action="store_true",
default=False,
help="only generate reports, don't run workflows. This will only rerun report "
"aggregation, not reportlet generation for specific nodes.",
)
g_other.add_argument(
"--config-file",
action="store",
metavar="FILE",
help="Use pre-generated configuration file. Values in file will be overridden "
"by command-line arguments.")
g_other.add_argument(
"--write-graph",
action="store_true",
default=False,
help="Write workflow graph.",
)
g_other.add_argument(
"--stop-on-first-crash",
action="store_true",
default=False,
help="Force stopping on first crash, even if a work directory"
" was specified.",
)
g_other.add_argument(
"--notrack",
action="store_true",
default=False,
help="Opt-out of sending tracking information of this run to "
"the FMRIPREP developers. This information helps to "
"improve FMRIPREP and provides an indicator of real "
"world usage crucial for obtaining funding.",
)
g_other.add_argument(
"--debug",
action="store",
nargs="+",
choices=config.DEBUG_MODES + ("all",),
help="Debug mode(s) to enable. 'all' is alias for all available modes.",
)
g_other.add_argument(
"--sloppy",
action="store_true",
default=False,
help="Use low-quality tools for speed - TESTING ONLY",
)
latest = check_latest()
if latest is not None and currentv < latest:
print(
"""\
You are using fMRIPrep-%s, and a newer version of fMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading"""
% (currentv, latest),
file=sys.stderr,
)
_blist = is_flagged()
if _blist[0]:
_reason = _blist[1] or "unknown"
print(
"""\
WARNING: Version %s of fMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage."""
% (config.environment.version, _reason),
file=sys.stderr,
)
return parser
def parse_args(args=None, namespace=None):
"""Parse args and run further checks on the command line."""
import logging
from niworkflows.utils.spaces import Reference, SpatialReferences
parser = _build_parser()
opts = parser.parse_args(args, namespace)
if opts.config_file:
skip = {} if opts.reports_only else {"execution": ("run_uuid",)}
config.load(opts.config_file, skip=skip)
config.loggers.cli.info(f"Loaded previous configuration file {opts.config_file}")
config.execution.log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
config.from_dict(vars(opts))
# Initialize --output-spaces if not defined
if config.execution.output_spaces is None:
config.execution.output_spaces = SpatialReferences(
[Reference("MNI152NLin2009cAsym", {"res": "native"})]
)
# Retrieve logging level
build_log = config.loggers.cli
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
import yaml
with open(opts.use_plugin) as f:
plugin_settings = yaml.load(f, Loader=yaml.FullLoader)
_plugin = plugin_settings.get("plugin")
if _plugin:
config.nipype.plugin = _plugin
config.nipype.plugin_args = plugin_settings.get("plugin_args", {})
config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get(
"n_procs", config.nipype.nprocs
)
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
if 1 < config.nipype.nprocs < config.nipype.omp_nthreads:
build_log.warning(
f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed "
f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})"
)
# Inform the user about the risk of using brain-extracted images
if config.workflow.skull_strip_t1w == "auto":
build_log.warning(
"""\
Option ``--skull-strip-t1w`` was set to 'auto'. A heuristic will be \
applied to determine whether the input T1w image(s) have already been skull-stripped.
If that were the case, brain extraction and INU correction will be skipped for those T1w \
inputs. Please, BEWARE OF THE RISKS TO THE CONSISTENCY of results when using varying \
processing workflows across participants. To determine whether a participant has been run \
through the shortcut pipeline (meaning, brain extraction was skipped), please check the \
citation boilerplate. When reporting results with varying pipelines, please make sure you \
mention this particular variant of fMRIPrep listing the participants for which it was \
applied."""
)
bids_dir = config.execution.bids_dir
output_dir = config.execution.output_dir
work_dir = config.execution.work_dir
version = config.environment.version
if config.execution.fs_subjects_dir is None:
config.execution.fs_subjects_dir = output_dir / "freesurfer"
# Wipe out existing work_dir
if opts.clean_workdir and work_dir.exists():
from niworkflows.utils.misc import clean_directory
build_log.info(f"Clearing previous fMRIPrep working directory: {work_dir}")
if not clean_directory(work_dir):
build_log.warning(
f"Could not clear all contents of working directory: {work_dir}"
)
# Ensure input and output folders are not the same
if output_dir == bids_dir:
parser.error(
"The selected output folder is the same as the input BIDS folder. "
"Please modify the output path (suggestion: %s)."
% bids_dir
/ "derivatives"
/ ("fmriprep-%s" % version.split("+")[0])
)
if bids_dir in work_dir.parents:
parser.error(
"The selected working directory is a subdirectory of the input BIDS folder. "
"Please modify the output path."
)
# Validate inputs
if not opts.skip_bids_validation:
from ..utils.bids import validate_input_dir
build_log.info(
"Making sure the input data is BIDS compliant (warnings can be ignored in most "
"cases)."
)
validate_input_dir(
config.environment.exec_env, opts.bids_dir, opts.participant_label
)
# Setup directories
config.execution.log_dir = output_dir / "fmriprep" / "logs"
# Check and create output and working directories
config.execution.log_dir.mkdir(exist_ok=True, parents=True)
output_dir.mkdir(exist_ok=True, parents=True)
work_dir.mkdir(exist_ok=True, parents=True)
# Force initialization of the BIDSLayout
config.execution.init()
all_subjects = config.execution.layout.get_subjects()
if config.execution.participant_label is None:
config.execution.participant_label = all_subjects
participant_label = set(config.execution.participant_label)
missing_subjects = participant_label - set(all_subjects)
if missing_subjects:
parser.error(
"One or more participant labels were not found in the BIDS directory: "
"%s." % ", ".join(missing_subjects)
)
config.execution.participant_label = sorted(participant_label)
config.workflow.skull_strip_template = config.workflow.skull_strip_template[0]
|
bsd-3-clause
|
eckucukoglu/arm-linux-gnueabihf
|
lib/python2.7/ctypes/test/test_wintypes.py
|
48
|
1473
|
import sys
import unittest
if not sys.platform.startswith('win'):
raise unittest.SkipTest('Windows-only test')
from ctypes import *
from ctypes import wintypes
class WinTypesTest(unittest.TestCase):
def test_variant_bool(self):
# reads 16-bits from memory, anything non-zero is True
for true_value in (1, 32767, 32768, 65535, 65537):
true = POINTER(c_int16)(c_int16(true_value))
value = cast(true, POINTER(wintypes.VARIANT_BOOL))
self.assertEqual(repr(value.contents), 'VARIANT_BOOL(True)')
vb = wintypes.VARIANT_BOOL()
self.assertIs(vb.value, False)
vb.value = True
self.assertIs(vb.value, True)
vb.value = true_value
self.assertIs(vb.value, True)
for false_value in (0, 65536, 262144, 2**33):
false = POINTER(c_int16)(c_int16(false_value))
value = cast(false, POINTER(wintypes.VARIANT_BOOL))
self.assertEqual(repr(value.contents), 'VARIANT_BOOL(False)')
# allow any bool conversion on assignment to value
for set_value in (65536, 262144, 2**33):
vb = wintypes.VARIANT_BOOL()
vb.value = set_value
self.assertIs(vb.value, True)
vb = wintypes.VARIANT_BOOL()
vb.value = [2, 3]
self.assertIs(vb.value, True)
vb.value = []
self.assertIs(vb.value, False)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
rooi/CouchPotatoServer
|
libs/tornado/locale.py
|
160
|
21946
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import csv
import datetime
import numbers
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8")
except TypeError:
# python 2: files return byte strings, which are decoded below.
f = open(full_path, "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}
|
gpl-3.0
|
slipstream/SlipStreamClient
|
client/src/main/python/slipstream/wrappers/BaseWrapper.py
|
1
|
25777
|
"""
SlipStream Client
=====
Copyright (C) 2014 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import traceback
from slipstream import util
from slipstream.SlipStreamHttpClient import SlipStreamHttpClient
from slipstream.NodeDecorator import NodeDecorator
from slipstream.exceptions import Exceptions
from slipstream.Client import Client
from slipstream.NodeInstance import NodeInstance
from slipstream.exceptions.Exceptions import TimeoutException, \
ExecutionException, InconsistentScaleStateError, InconsistentScalingNodesError
class RuntimeParameter(object):
def __init__(self, config_holder):
self._ss = SlipStreamHttpClient(config_holder)
self._ss.set_retry(True)
def set(self, parameter, value, ignore_abort=True):
self._ss.setRuntimeParameter(parameter, value, ignoreAbort=ignore_abort)
def set_from_parts(self, category, key, value, ignore_abort=True):
parameter = category + NodeDecorator.NODE_PROPERTY_SEPARATOR + key
self.set(parameter, value, ignore_abort)
class NodeInfoPublisher(SlipStreamHttpClient):
def __init__(self, config_holder):
super(NodeInfoPublisher, self).__init__(config_holder)
self.set_retry(True)
def publish(self, nodename, vm_id, vm_ip):
self.publish_instanceid(nodename, vm_id)
self.publish_hostname(nodename, vm_ip)
def publish_instanceid(self, nodename, vm_id):
self._set_runtime_parameter(nodename, 'instanceid', vm_id)
def publish_hostname(self, nodename, vm_ip):
self._set_runtime_parameter(nodename, 'hostname', vm_ip)
def publish_url_ssh(self, nodename, vm_ip, username):
url = 'ssh://%s@%s' % (username.strip(), vm_ip.strip())
self._set_runtime_parameter(nodename, 'url.ssh', url)
def _set_runtime_parameter(self, nodename, key, value):
parameter = nodename + NodeDecorator.NODE_PROPERTY_SEPARATOR + key
self.setRuntimeParameter(parameter, value, ignoreAbort=True)
SS_POLLING_INTERVAL_SEC = 10
class BaseWrapper(object):
SCALE_ACTION_CREATION = 'vm_create'
SCALE_STATE_CREATING = 'creating'
SCALE_STATE_CREATED = 'created'
SCALE_ACTION_REMOVAL = 'vm_remove'
SCALE_STATE_REMOVING = 'removing'
SCALE_STATE_REMOVED = 'removed'
SCALE_STATE_GONE = 'gone'
SCALE_ACTION_RESIZE = 'vm_resize'
SCALE_STATE_RESIZING = 'resizing'
SCALE_STATE_RESIZED = 'resized'
SCALE_ACTION_DISK_ATTACH = 'disk_attach'
SCALE_STATE_DISK_ATTACHING = 'disk_attaching'
SCALE_STATE_DISK_ATTACHED = 'disk_attached'
SCALE_ACTION_DISK_DETACH = 'disk_detach'
SCALE_STATE_DISK_DETACHING = 'disk_detaching'
SCALE_STATE_DISK_DETACHED = 'disk_detached'
SCALE_STATES_START_STOP_MAP = {
SCALE_STATE_CREATING: SCALE_STATE_CREATED,
SCALE_STATE_REMOVING: SCALE_STATE_REMOVED,
SCALE_STATE_RESIZING: SCALE_STATE_RESIZED,
SCALE_STATE_DISK_ATTACHING: SCALE_STATE_DISK_ATTACHED,
SCALE_STATE_DISK_DETACHING: SCALE_STATE_DISK_DETACHED}
SCALE_STATE_OPERATIONAL = 'operational'
SCALE_STATES_TERMINAL = (SCALE_STATE_OPERATIONAL, SCALE_STATE_GONE)
SCALE_STATES_VERTICAL_SCALABILITY = (SCALE_STATE_RESIZING,
SCALE_STATE_DISK_ATTACHING,
SCALE_STATE_DISK_DETACHING)
STATE_TO_ACTION = {
SCALE_STATE_CREATING: SCALE_ACTION_CREATION,
SCALE_STATE_CREATED: SCALE_ACTION_CREATION,
SCALE_STATE_REMOVING: SCALE_ACTION_REMOVAL,
SCALE_STATE_REMOVED: SCALE_ACTION_REMOVAL,
SCALE_STATE_RESIZING: SCALE_ACTION_RESIZE,
SCALE_STATE_RESIZED: SCALE_ACTION_RESIZE,
SCALE_STATE_DISK_ATTACHING: SCALE_ACTION_DISK_ATTACH,
SCALE_STATE_DISK_ATTACHED: SCALE_ACTION_DISK_ATTACH,
SCALE_STATE_DISK_DETACHING: SCALE_ACTION_DISK_DETACH,
SCALE_STATE_DISK_DETACHED: SCALE_ACTION_DISK_DETACH,
}
def __init__(self, config_holder):
self._ss_client = SlipStreamHttpClient(config_holder)
self._ss_client.set_retry(True)
self._ss_client.ignoreAbort = True
self.my_node_instance_name = self._get_my_node_instance_name(config_holder)
self._config_holder = config_holder
self._user_info = None
self._run_parameters = None
self._nodes_instances = {}
self._state_start_time = None
@staticmethod
def _get_my_node_instance_name(config_holder):
try:
return config_holder.node_instance_name
except Exception:
raise Exceptions.ExecutionException('Failed to get the node instance name of the the current VM')
def get_my_node_instance_name(self):
return self.my_node_instance_name
def get_slipstream_client(self):
return self._ss_client
def complete_state(self, node_instance_name=None):
if not node_instance_name:
node_instance_name = self.get_my_node_instance_name()
self._ss_client.complete_state(node_instance_name)
def fail(self, message):
key = self._qualifyKey(NodeDecorator.ABORT_KEY)
self._fail(key, message)
def fail_global(self, message):
key = NodeDecorator.globalNamespacePrefix + NodeDecorator.ABORT_KEY
self._fail(key, message)
def _fail(self, key, message):
util.printError('Failing... %s' % message)
traceback.print_exc()
value = util.truncate_middle(Client.VALUE_LENGTH_LIMIT, message, '\n(truncated)\n')
self._ss_client.setRuntimeParameter(key, value)
def getState(self):
key = NodeDecorator.globalNamespacePrefix + NodeDecorator.STATE_KEY
return self._get_runtime_parameter(key)
def get_recovery_mode(self):
key = NodeDecorator.globalNamespacePrefix + NodeDecorator.RECOVERY_MODE_KEY
return util.str2bool(self._get_runtime_parameter(key))
def isAbort(self):
key = NodeDecorator.globalNamespacePrefix + NodeDecorator.ABORT_KEY
try:
value = self._get_runtime_parameter(key, True)
except Exceptions.NotYetSetException:
value = ''
return (value and True) or False
def get_max_iaas_workers(self):
"""Available only on orchestrator.
"""
return self._get_runtime_parameter(self._qualifyKey("max.iaas.workers"))
def get_run_category(self):
return self._ss_client.get_run_category()
def get_run_type(self):
return self._ss_client.get_run_type()
def _qualifyKey(self, key):
"""Qualify the key, if not already done, with the right nodename"""
_key = key
# Is this a reserved or special nodename?
for reserved in NodeDecorator.reservedNodeNames:
if _key.startswith(reserved + NodeDecorator.NODE_PROPERTY_SEPARATOR):
return _key
# Is the key namespaced (i.e. contains node/key separator: ':')?
if NodeDecorator.NODE_PROPERTY_SEPARATOR in _key:
# Is the nodename in the form: <nodename>.<index>? If not, make it so
# such that <nodename>:<property> -> <nodename>.1:<property
parts = _key.split(NodeDecorator.NODE_PROPERTY_SEPARATOR)
nodenamePart = parts[0]
propertyPart = parts[1] # safe since we've done the test in the if above
parts = nodenamePart.split(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)
nodename = parts[0]
if len(parts) == 1:
_key = nodename + \
NodeDecorator.NODE_MULTIPLICITY_SEPARATOR + \
NodeDecorator.nodeMultiplicityStartIndex + \
NodeDecorator.NODE_PROPERTY_SEPARATOR + \
propertyPart
return _key
_key = self.get_my_node_instance_name() + NodeDecorator.NODE_PROPERTY_SEPARATOR + _key
return _key
def get_cloud_instance_id(self):
key = self._qualifyKey(NodeDecorator.INSTANCEID_KEY)
return self._get_runtime_parameter(key)
def get_user_ssh_pubkey(self):
userInfo = self._get_user_info('')
return userInfo.get_public_keys()
def get_pre_scale_done(self, node_instance_or_name=None):
"""Get pre.scale.done RTP for the current node instance or for the requested one
(by NodeInstance object or node instance name).
:param node_instance_or_name: node instance or node instance name
:type node_instance_or_name: NodeInstance or str
:return:
"""
if node_instance_or_name:
key = self._build_rtp(node_instance_or_name, NodeDecorator.PRE_SCALE_DONE)
else:
key = self._qualifyKey(NodeDecorator.PRE_SCALE_DONE)
return self._get_runtime_parameter(key)
def is_pre_scale_done(self, node_instance_or_name=None):
"""Checks if pre-scale action is done on itself (node_instance_or_name is not provided)
or on a requested node instance (by NodeInstance object or node instance name).
:param node_instance_or_name: node instance or node instance name
:type node_instance_or_name: NodeInstance or str
:return: True or False
:rtype: ``bool``
"""
value = self.get_pre_scale_done(node_instance_or_name)
return NodeDecorator.PRE_SCALE_DONE_SUCCESS == value
@staticmethod
def _build_rtp(category, key):
if isinstance(category, NodeInstance):
category = category.get_name()
return category + NodeDecorator.NODE_PROPERTY_SEPARATOR + key
def _get_runtime_parameter(self, key, ignore_abort=False):
return self._ss_client.getRuntimeParameter(key, ignoreAbort=ignore_abort)
def need_to_stop_images(self, ignore_on_success_run_forever=False):
# pylint: disable=unused-argument
return False
def is_mutable(self):
mutable = self._ss_client.get_run_mutable()
return util.str2bool(mutable)
def set_scale_state_on_node_instances(self, instance_names, scale_state):
for instance_name in instance_names:
key = instance_name + NodeDecorator.NODE_PROPERTY_SEPARATOR + NodeDecorator.SCALE_STATE_KEY
self._ss_client.setRuntimeParameter(key, scale_state)
def is_scale_state_operational(self):
return self.SCALE_STATE_OPERATIONAL == self.get_scale_state()
def is_scale_state_creating(self):
return self.SCALE_STATE_CREATING == self.get_scale_state()
def _is_scale_state_terminal(self, state):
return state in self.SCALE_STATES_TERMINAL
def set_scale_state_operational(self):
self.set_scale_state(self.SCALE_STATE_OPERATIONAL)
def set_scale_state_created(self):
self.set_scale_state(self.SCALE_STATE_CREATED)
def set_scale_state(self, scale_state):
"""Set scale state for this node instances.
"""
key = self._qualifyKey(NodeDecorator.SCALE_STATE_KEY)
self._ss_client.setRuntimeParameter(key, scale_state)
def get_scale_state(self):
"""Get scale state for this node instances.
"""
key = self._qualifyKey(NodeDecorator.SCALE_STATE_KEY)
return self._get_runtime_parameter(key)
def _get_effective_scale_state(self, node_instance_or_name):
"""Get effective node instance scale state from the server.
"""
if isinstance(node_instance_or_name, NodeInstance):
node_instance_or_name = node_instance_or_name.get_name()
key = node_instance_or_name + NodeDecorator.NODE_PROPERTY_SEPARATOR + \
NodeDecorator.SCALE_STATE_KEY
return self._get_runtime_parameter(key)
def _get_effective_scale_states(self):
"""Extract node instances in scaling states and update their effective
states from the server.
Return {scale_state: [node_instance_name, ], }
"""
states_instances = {}
for node_instance_name, node_instance in self._get_nodes_instances().iteritems():
state = node_instance.get_scale_state()
if not self._is_scale_state_terminal(state):
state = self._get_effective_scale_state(node_instance_name)
states_instances.setdefault(state, []).append(node_instance_name)
return states_instances
def _get_global_scale_state(self):
"""Return scale state all the node instances are in, or None.
Raise InconsistentScaleStateError if there are instances in different states.
For consistency reasons, only single scalability action is allowed.
"""
states_node_instances = self._get_effective_scale_states()
if len(states_node_instances) == 0:
return None
if len(states_node_instances) == 1:
return states_node_instances.keys()[0]
msg = "Inconsistent scaling situation. Single scaling action allowed," \
" found: %s" % states_node_instances
raise InconsistentScaleStateError(msg)
def get_global_scale_action(self):
state = self._get_global_scale_state()
return self._state_to_action(state)
def get_scale_action(self):
state = self.get_scale_state()
return self._state_to_action(state)
def check_scale_state_consistency(self):
states_node_instances = self._get_effective_scale_states()
states_node_instances.pop(self.SCALE_STATE_REMOVED, None)
if len(states_node_instances) > 1:
msg = "Inconsistent scaling situation. Single scaling action allowed," \
" found: %s" % states_node_instances
raise InconsistentScaleStateError(msg)
def get_scaling_node_and_instance_names(self):
"""Return name of the node and the corresponding instances that are
currently being scaled.
:return: two-tuple with scaling node name and a list of node instance names.
:rtype: (node_name, [node_instance_name, ])
:raises: ExecutionExcetion if more than one node type is being scaled.
"""
node_names = set()
node_instance_names = []
for node_instance_name, node_instance in self._get_nodes_instances().iteritems():
state = node_instance.get_scale_state()
if not self._is_scale_state_terminal(state):
node_names.add(node_instance.get_node_name())
node_instance_names.append(node_instance_name)
if len(node_names) > 1:
msg = "Inconsistent scaling situation. Scaling of only single" \
" node type is allowed, found: %s" % ', '.join(node_names)
raise InconsistentScalingNodesError(msg)
if not node_names:
return '', node_instance_names
else:
return node_names.pop(), node_instance_names
def _state_to_action(self, state):
return self.STATE_TO_ACTION.get(state, None)
def get_node_instances_in_scale_state(self, scale_state):
"""Return dict {<node_instance_name>: NodeInstance, } with the node instances
in the scale_state.
"""
instances = {}
nodes_instances = self._get_nodes_instances()
for instance_name, instance in nodes_instances.iteritems():
if instance.get_scale_state() == scale_state:
instances[instance_name] = instance
return instances
def send_report(self, filename):
self._ss_client.sendReport(filename)
def set_statecustom(self, message):
key = self._qualifyKey(NodeDecorator.STATECUSTOM_KEY)
self._ss_client.setRuntimeParameter(key, message)
def set_pre_scale_done(self):
"""To be called by NodeDeploymentExecutor. Not thread-safe.
"""
key = self._qualifyKey(NodeDecorator.PRE_SCALE_DONE)
self._ss_client.setRuntimeParameter(key, NodeDecorator.PRE_SCALE_DONE_SUCCESS)
def unset_pre_scale_done(self):
"""To be called by NodeDeploymentExecutor. Not thread-safe.
"""
key = self._qualifyKey(NodeDecorator.PRE_SCALE_DONE)
self._ss_client.setRuntimeParameter(key, 'false')
def set_scale_action_done(self):
"""To be called by NodeDeploymentExecutor. Sets an end of the scaling action.
Not thread-safe.
"""
scale_state_start = self.get_scale_state()
key = self._qualifyKey(NodeDecorator.SCALE_STATE_KEY)
try:
scale_done = self.SCALE_STATES_START_STOP_MAP[scale_state_start]
except KeyError:
raise ExecutionException(
"Unable to set the end of scale action on %s. Don't know start->done mapping for %s." %
(key, scale_state_start))
else:
self._ss_client.setRuntimeParameter(key, scale_done)
def set_scale_iaas_done(self, node_instance_or_name):
"""To be called on Orchestrator. Thread-safe implementation.
:param node_instance_or_name: node instance object or node instance name
:type node_instance_or_name: NodeInstance or str
"""
self._set_scale_iaas_done_rtp(node_instance_or_name, NodeDecorator.SCALE_IAAS_DONE_SUCCESS)
def set_scale_iaas_done_and_set_attached_disk(self, node_instance_or_name, disk):
"""To be called on Orchestrator. Thread-safe implementation.
:param node_instance_or_name: node instance object or node instance name
:type node_instance_or_name: NodeInstance or str
:param disk: identifier of the attached disk
:type disk: str
"""
self.set_scale_iaas_done(node_instance_or_name)
self.set_attached_disk(node_instance_or_name, disk)
def unset_scale_iaas_done(self, node_instance_or_name):
"""To be called on Orchestrator. Thread-safe implementation.
:param node_instance_or_name: node instance object or node instance name
:type node_instance_or_name: NodeInstance or str
"""
self._set_scale_iaas_done_rtp(node_instance_or_name, 'false')
def _set_scale_iaas_done_rtp(self, node_instance_or_name, value):
"""To be called on Orchestrator. Thread-safe implementation.
:param node_instance_or_name: node instance object or node instance name
:type node_instance_or_name: NodeInstance or str
"""
self._set_rtp(node_instance_or_name, NodeDecorator.SCALE_IAAS_DONE, value)
def set_attached_disk(self, node_instance_or_name, disk):
self._set_attached_disk_rtp(node_instance_or_name, disk)
def _set_attached_disk_rtp(self, node_instance_or_name, value):
self._set_rtp(node_instance_or_name, NodeDecorator.SCALE_DISK_ATTACHED_DEVICE, value)
def _set_rtp(self, node_instance_or_name, key, value):
if isinstance(node_instance_or_name, NodeInstance):
node_instance_or_name = node_instance_or_name.get_name()
rtp = node_instance_or_name + NodeDecorator.NODE_PROPERTY_SEPARATOR + key
self._set_runtime_parameter(rtp, value)
def is_vertical_scaling(self):
return self._get_global_scale_state() in self.SCALE_STATES_VERTICAL_SCALABILITY
def is_vertical_scaling_vm(self):
return self.get_scale_state() in self.SCALE_STATES_VERTICAL_SCALABILITY
def is_horizontal_scale_down(self):
return self._get_global_scale_state() == self.SCALE_STATE_REMOVING
def is_horizontal_scale_down_vm(self):
return self.get_scale_state() == self.SCALE_STATE_REMOVING
def _set_runtime_parameter(self, parameter, value):
# Needed for thread safety.
RuntimeParameter(self._get_config_holder_deepcopy()).set(parameter, value)
def _get_config_holder(self):
return self._config_holder
def _get_config_holder_deepcopy(self):
return self._get_config_holder().deepcopy()
@staticmethod
def _wait_rtp_equals(node_instances, expected_value, rtp_getter, timeout_at,
polling_interval=None):
"""Blocking wait with timeout until the RTP is set to the expected value
on the set of node instances.
NB! RTP name is NOT known. A getter function is used to get the value.
:param node_instances: list of node instances
:type node_instances: list [NodeInstance, ]
:param expected_value: value to which the rtp should be set to
:type expected_value: str
:param rtp_getter: function to get the rtp; should accept NodeInstance or node instance name
:type rtp_getter: callable
:param timeout_at: wall-clock time to timeout at
:type timeout_at: int or float
"""
node_instance_to_result = dict([(ni.get_name(), False) for ni in node_instances])
polling_interval = polling_interval or SS_POLLING_INTERVAL_SEC
_polling_interval = 0
while not all(node_instance_to_result.values()):
if (timeout_at > 0) and (time.time() >= timeout_at):
raise TimeoutException("Timed out while waiting for RTP to be set to '%s' on %s." %
(expected_value, node_instance_to_result))
time.sleep(_polling_interval)
for node_instance in node_instances:
node_instance_name = node_instance.get_name()
if not node_instance_to_result[node_instance_name]:
if expected_value == rtp_getter(node_instance):
node_instance_to_result[node_instance_name] = expected_value
_polling_interval = polling_interval
def wait_scale_iaas_done(self):
"""To be called by NodeDeployentExecutor on the node instance.
Blocking wait (with timeout) until RTP scale.iaas.done is set by Orchestrator.
:raises: TimeoutException
"""
timeout_at = 0 # no timeout
self._log('Waiting for Orchestrator to finish scaling this node instance (no timeout).')
node_instances = [self.get_my_node_instance()]
self._wait_rtp_equals(node_instances, NodeDecorator.SCALE_IAAS_DONE_SUCCESS,
self.get_scale_iaas_done, timeout_at)
self._log('All node instances finished pre-scaling.')
def get_scale_iaas_done(self, node_instance_or_name):
parameter = self._build_rtp(node_instance_or_name, NodeDecorator.SCALE_IAAS_DONE)
return self._get_runtime_parameter(parameter)
def _get_cloud_service_name(self):
return os.environ[util.ENV_CONNECTOR_INSTANCE]
def set_state_start_time(self):
self._state_start_time = time.time()
def get_state_start_time(self):
return (self._state_start_time is None) and time.time() or self._state_start_time
#
# Local cache of NodesInstances, Run, Run Parameters and User.
#
def discard_nodes_info_locally(self):
self._nodes_instances = {}
def _get_nodes_instances(self):
"""Return dict {<node_instance_name>: NodeInstance, }
"""
node_instances = self._get_nodes_instances_with_orchestrators()
return dict([(k, ni) for k, ni in node_instances.iteritems() if not ni.is_orchestrator()])
def _get_nodes_instances_with_orchestrators(self):
"""Return dict {<node_instance_name>: NodeInstance, }
"""
if not self._nodes_instances:
self._nodes_instances = self._ss_client.get_nodes_instances(self._get_cloud_service_name())
return self._nodes_instances
def get_my_node_instance(self):
node_name = self.get_my_node_instance_name()
return self._get_nodes_instances_with_orchestrators().get(node_name)
def discard_run_locally(self):
self._ss_client.discard_run()
def discard_user_info_locally(self):
self._user_info = None
def _get_user_info(self, cloud_service_name):
if self._user_info is None:
self._user_info = self._ss_client.get_user_info(cloud_service_name)
return self._user_info
def discard_run_parameters_locally(self):
self._run_parameters = None
def _get_run_parameters(self):
if self._run_parameters is None:
self._run_parameters = self._ss_client.get_run_parameters()
return self._run_parameters
def has_to_execute_build_recipes(self):
run_parameters = self._get_run_parameters()
key = self.get_my_node_instance_name().rsplit(NodeDecorator.NODE_MULTIPLICITY_SEPARATOR, 1)[0] \
+ NodeDecorator.NODE_PROPERTY_SEPARATOR \
+ NodeDecorator.RUN_BUILD_RECIPES_KEY
return util.str2bool(run_parameters.get(key))
def clean_local_cache(self):
self.discard_run_locally()
self.discard_nodes_info_locally()
self.discard_run_parameters_locally()
#
# Helpers
#
def _terminate_run_server_side(self):
self._ss_client.terminate_run()
def _put_new_image_id(self, url, new_image_id):
self._ss_client.put_new_image_id(url, new_image_id)
def _log_and_set_statecustom(self, msg):
self._log(msg)
try:
self.set_statecustom(msg)
except Exception as ex:
self._log('Failed to set statecustom with: %s' % str(ex))
@staticmethod
def _log(msg):
util.printDetail(msg, verboseThreshold=0)
|
apache-2.0
|
futurice/vdsm
|
vdsm_hooks/checkimages/before_vm_start.py
|
5
|
4691
|
#!/usr/bin/python
import os
import sys
import traceback
import fcntl
import struct
import hooking
BLKGETSIZE64 = 0x80081272 # Obtain device size in bytes
FORMAT = 'L'
TIMEPERGIB = 0.02 # Approximate qemu-img check time (in seconds) to check 1GiB
GIB = 10 ** 9 # GiB
'''
checkimages vdsm hook
=====================
Hook performs consistency check on all qcow2 format disk images of a
particular VM using the QEMU disk image utility.
Accepts optional parameter 'timeout' (in seconds) to specify how long
the hook should wait for the QEMU disk image utility operation to complete.
Without 'timeout' specified, particular timeout is computed based on
image size.
syntax:
checkimages=true(|,timeout:\d+\.{1}\d+);
example:
checkimages=true,timeout:1.12 # Use 1.12 seconds as timeout
checkimages=true # Compute timeout based on image size
Note: Timeout value is taken in seconds. Check of 1GB image takes ~0.02 s.
'''
def computeImageTimeout(disk_image, driver_type):
'''
Compute expected timeout value for image. Use value of 10s as default
timeout for very small images (where delay in image check launch could
cause the VM to fail to start. Use precomputed value in cases required
timeout is bigger than 10 seconds.
'''
default_timeout = float(10)
image_size = getImageSize(disk_image, driver_type)
image_timeout = float(image_size * TIMEPERGIB)
if image_timeout > default_timeout:
return image_timeout
return default_timeout
def getImageSize(disk_image, driver_type):
'''
Obtain qcow2 image size in GiBs
'''
if driver_type == 'block':
dev_buffer = ' ' * 8
with open(disk_image) as device:
dev_buffer = fcntl.ioctl(device.fileno(), BLKGETSIZE64, dev_buffer)
image_bytes = struct.unpack(FORMAT, dev_buffer)[0]
elif driver_type == 'file':
image_bytes = os.stat(disk_image).st_size
return float(image_bytes / GIB)
def checkImage(path, timeout):
'''
Check qcow2 image using qemu-img QEMU utility
'''
cmd = ['/usr/bin/qemu-img', 'check', '-f', 'qcow2', path]
# Check the image using qemu-img. Enforce check termination
# on timeout expiration
p = hooking.execCmd(cmd, raw=True, sync=False)
if not p.wait(timeout):
p.kill()
sys.stderr.write('checkimages: %s image check operation timed out.' %
path)
sys.stderr.write('Increate timeout or check image availability.')
sys.exit(2)
((out, err), rc) = (p.communicate(), p.returncode)
if rc == 0:
sys.stderr.write('checkimages: %s image check returned: %s\n' %
(path, out))
else:
sys.stderr.write('checkimages: Error running %s command: %s\n' %
(' '.join(cmd), err))
sys.exit(2)
if 'checkimages' in os.environ:
requested_timeout = None
try:
env_value = os.environ['checkimages']
# checkimages=true,timeout:1.23 case => get requested timeout value
if ',' in env_value:
timeout = (env_value.split(',', 2)[1]).split(':', 2)[1]
requested_timeout = float(timeout)
domxml = hooking.read_domxml()
disks = domxml.getElementsByTagName('disk')
for disk in disks:
disk_device = disk.getAttribute('device')
if disk_device != 'disk':
continue
drivers = disk.getElementsByTagName('driver')
sources = disk.getElementsByTagName('source')
if not drivers or not sources:
continue
driver_type = drivers[0].getAttribute('type') # 'raw' or 'qcow2'
if driver_type != 'qcow2':
continue
disk_type = disk.getAttribute('type') # 'block' or 'file'
disk_image = None
if disk_type == 'block':
disk_image = sources[0].getAttribute('dev')
elif disk_type == 'file':
disk_image = sources[0].getAttribute('file')
if disk_image:
image_timeout = computeImageTimeout(disk_image, disk_type)
# Explicit timeout was requested, use it instead of the
# precomputed one
if requested_timeout is not None:
image_timeout = requested_timeout
sys.stderr.write('checkimages: Checking image %s. ' %
disk_image)
checkImage(disk_image, image_timeout)
except:
sys.stderr.write('checkimages [unexpected error]: %s\n' %
traceback.format_exc())
sys.exit(2)
|
gpl-2.0
|
ataylor32/django
|
tests/template_tests/test_engine.py
|
199
|
3971
|
import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
@ignore_warnings(category=RemovedInDjango110Warning)
class DeprecatedRenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
libraries={'custom': 'template_tests.templatetags.custom'},
)
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
def test_existing_context_kept_clean(self):
context = Context({'obj': 'before'})
output = self.engine.render_to_string(
'test_context.html', {'obj': 'after'}, context_instance=context,
)
self.assertEqual(output, 'obj:after\n')
self.assertEqual(context['obj'], 'before')
def test_no_empty_dict_pushed_to_stack(self):
"""
#21741 -- An empty dict should not be pushed to the context stack when
render_to_string is called without a context argument.
"""
# The stack should have a length of 1, corresponding to the builtins
self.assertEqual(
'1',
self.engine.render_to_string('test_context_stack.html').strip(),
)
self.assertEqual(
'1',
self.engine.render_to_string(
'test_context_stack.html',
context_instance=Context()
).strip(),
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
@ignore_warnings(category=RemovedInDjango110Warning)
class TemplateDirsOverrideTests(SimpleTestCase):
DIRS = ((OTHER_DIR, ), [OTHER_DIR])
def setUp(self):
self.engine = Engine()
def test_render_to_string(self):
for dirs in self.DIRS:
self.assertEqual(
self.engine.render_to_string('test_dirs.html', dirs=dirs),
'spam eggs\n',
)
def test_get_template(self):
for dirs in self.DIRS:
template = self.engine.get_template('test_dirs.html', dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
def test_select_template(self):
for dirs in self.DIRS:
template = self.engine.select_template(['test_dirs.html'], dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
|
bsd-3-clause
|
blrm/openshift-tools
|
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/test/unit/test_oc_group.py
|
17
|
7827
|
'''
Unit tests for oc group
'''
import copy
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_group import OCGroup, locate_oc_binary # noqa: E402
class OCGroupTest(unittest.TestCase):
'''
Test class for OCGroup
'''
params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'acme',
'namespace': 'test'}
@mock.patch('oc_group.Utils.create_tmpfile_copy')
@mock.patch('oc_group.OCGroup._run')
def test_create_group(self, mock_run, mock_tmpfile_copy):
''' Testing a group create '''
params = copy.deepcopy(OCGroupTest.params)
group = '''{
"kind": "Group",
"apiVersion": "v1",
"metadata": {
"name": "acme"
},
"users": []
}'''
mock_run.side_effect = [
(1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
(1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
(0, '', ''),
(0, group, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCGroup.run_ansible(params, False)
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'acme')
@mock.patch('oc_group.Utils.create_tmpfile_copy')
@mock.patch('oc_group.OCGroup._run')
def test_failed_get_group(self, mock_run, mock_tmpfile_copy):
''' Testing a group create '''
params = copy.deepcopy(OCGroupTest.params)
params['state'] = 'list'
params['name'] = 'noexist'
mock_run.side_effect = [
(1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCGroup.run_ansible(params, False)
self.assertTrue(results['failed'])
@mock.patch('oc_group.Utils.create_tmpfile_copy')
@mock.patch('oc_group.OCGroup._run')
def test_delete_group(self, mock_run, mock_tmpfile_copy):
''' Testing a group create '''
params = copy.deepcopy(OCGroupTest.params)
params['state'] = 'absent'
group = '''{
"kind": "Group",
"apiVersion": "v1",
"metadata": {
"name": "acme"
},
"users": [
"user1"
]
}'''
mock_run.side_effect = [
(0, group, ''),
(0, '', ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCGroup.run_ansible(params, False)
self.assertTrue(results['changed'])
@mock.patch('oc_group.Utils.create_tmpfile_copy')
@mock.patch('oc_group.OCGroup._run')
def test_get_group(self, mock_run, mock_tmpfile_copy):
''' Testing a group create '''
params = copy.deepcopy(OCGroupTest.params)
params['state'] = 'list'
group = '''{
"kind": "Group",
"apiVersion": "v1",
"metadata": {
"name": "acme"
},
"users": [
"user1"
]
}'''
mock_run.side_effect = [
(0, group, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCGroup.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results'][0]['metadata']['name'], 'acme')
self.assertEqual(results['results'][0]['users'][0], 'user1')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
|
apache-2.0
|
waseem18/oh-mainline
|
vendor/packages/sphinx/sphinx/pycode/pgen2/tokenize.py
|
32
|
16435
|
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from sphinx.pycode.pgen2.token import *
from sphinx.pycode.pgen2 import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, scell, ecell, line): # for testing
srow, scol = scell
erow, ecol = ecell
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
# if we are not at the end of the file make sure the
# line ends with a newline because the parser depends
# on that.
if line:
line = line.rstrip() + '\n'
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
agpl-3.0
|
nrwahl2/ansible
|
lib/ansible/modules/cloud/amazon/elb_application_lb_facts.py
|
26
|
10370
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_application_lb_facts
short_description: Gather facts about application ELBs in AWS
description:
- Gather facts about application ELBs in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arns:
description:
- The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
required: false
names:
description:
- The names of the load balancers.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_application_lb_facts:
# Gather facts about the target group attached to a particular ELB
- elb_application_lb_facts:
load_balancer_arns:
- "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_application_lb_facts:
names:
- elb1
- elb2
'''
RETURN = '''
load_balancers:
description: a list of load balancers
returned: always
type: complex
contains:
access_logs_s3_bucket:
description: The name of the S3 bucket for the access logs.
returned: when status is present
type: string
sample: mys3bucket
access_logs_s3_enabled:
description: Indicates whether access logs stored in Amazon S3 are enabled.
returned: when status is present
type: string
sample: true
access_logs_s3_prefix:
description: The prefix for the location in the S3 bucket.
returned: when status is present
type: string
sample: /my/logs
availability_zones:
description: The Availability Zones for the load balancer.
returned: when status is present
type: list
sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
canonical_hosted_zone_id:
description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
returned: when status is present
type: string
sample: ABCDEF12345678
created_time:
description: The date and time the load balancer was created.
returned: when status is present
type: string
sample: "2015-02-12T02:14:02+00:00"
deletion_protection_enabled:
description: Indicates whether deletion protection is enabled.
returned: when status is present
type: string
sample: true
dns_name:
description: The public DNS name of the load balancer.
returned: when status is present
type: string
sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
idle_timeout_timeout_seconds:
description: The idle timeout value, in seconds.
returned: when status is present
type: string
sample: 60
ip_address_type:
description: The type of IP addresses used by the subnets for the load balancer.
returned: when status is present
type: string
sample: ipv4
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when status is present
type: string
sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
load_balancer_name:
description: The name of the load balancer.
returned: when status is present
type: string
sample: my-elb
scheme:
description: Internet-facing or internal load balancer.
returned: when status is present
type: string
sample: internal
security_groups:
description: The IDs of the security groups for the load balancer.
returned: when status is present
type: list
sample: ['sg-0011223344']
state:
description: The state of the load balancer.
returned: when status is present
type: dict
sample: "{'code': 'active'}"
tags:
description: The tags attached to the load balancer.
returned: when status is present
type: dict
sample: "{
'Tag': 'Example'
}"
type:
description: The type of load balancer.
returned: when status is present
type: string
sample: application
vpc_id:
description: The ID of the VPC for the load balancer.
returned: when status is present
type: string
sample: vpc-0011223344
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_elb_listeners(connection, module, elb_arn):
try:
return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_listener_rules(connection, module, listener_arn):
try:
return connection.describe_rules(ListenerArn=listener_arn)['Rules']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_load_balancer_attributes(connection, module, load_balancer_arn):
try:
load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in list(load_balancer_attributes.items()):
load_balancer_attributes[k.replace('.', '_')] = v
del load_balancer_attributes[k]
return load_balancer_attributes
def get_load_balancer_tags(connection, module, load_balancer_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_load_balancers(connection, module):
load_balancer_arns = module.params.get("load_balancer_arns")
names = module.params.get("names")
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
if not load_balancer_arns and not names:
load_balancers = load_balancer_paginator.paginate().build_full_result()
if load_balancer_arns:
load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
if names:
load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
module.exit_json(load_balancers=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
for load_balancer in load_balancers['LoadBalancers']:
# Get the attributes for each elb
load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
# Get the listeners for each elb
load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
# For each listener, get listener rules
for listener in load_balancer['listeners']:
listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
# Get tags for each load balancer
for snaked_load_balancer in snaked_load_balancers:
snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
module.exit_json(load_balancers=snaked_load_balancers)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_load_balancers(connection, module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
glo/ee384b
|
opencv/tests/python/test_adaptors.py
|
5
|
3898
|
#!/usr/bin/env python
"""A simple TestCase class for testing the adaptors.py module.
2007-11-xx, Vicent Mas <[email protected]> Carabos Coop. V.
2007-11-08, minor modifications for distribution, Mark Asbach <[email protected]>
"""
import unittest
import os
import PIL.Image
import numpy
import cvtestutils
import cv
import highgui
import adaptors
import sys
class AdaptorsTestCase(unittest.TestCase):
def test00_array_interface(self):
"""Check if PIL supports the array interface."""
self.assert_(PIL.Image.VERSION>='1.1.6',
"""The installed PIL library doesn't support the array """
"""interface. Please, update to version 1.1.6b2 or higher.""")
def test01_PIL2NumPy(self):
"""Test the adaptors.PIL2NumPy function."""
a = adaptors.PIL2NumPy(self.pil_image)
self.assert_(a.flags['WRITEABLE'] == True,
'PIL2NumPy should return a writeable array.')
b = numpy.asarray(self.pil_image)
self.assert_((a == b).all() == True,
'The returned numpy array has not been properly constructed.')
def test02_NumPy2PIL(self):
"""Test the adaptors.NumPy2PIL function."""
a = numpy.asarray(self.pil_image)
b = adaptors.NumPy2PIL(a)
self.assert_(self.pil_image.tostring() == b.tostring(),
'The returned image has not been properly constructed.')
def test03_Ipl2PIL(self):
"""Test the adaptors.Ipl2PIL function."""
i = adaptors.Ipl2PIL(self.ipl_image)
self.assert_(self.pil_image.tostring() == i.tostring(),
'The returned image has not been properly constructed.')
def test04_PIL2Ipl(self):
"""Test the adaptors.PIL2Ipl function."""
i = adaptors.PIL2Ipl(self.pil_image)
self.assert_(self.ipl_image.imageData == i.imageData,
'The returned image has not been properly constructed.')
def test05_Ipl2NumPy(self):
"""Test the adaptors.Ipl2NumPy function."""
a = adaptors.Ipl2NumPy(self.ipl_image)
a_1d = numpy.reshape(a, (a.size, ))
# For 3-channel IPL images the order of channels will be BGR
# but NumPy array order of channels will be RGB so a conversion
# is needed before we can compare both images
if self.ipl_image.nChannels == 3:
rgb = cv.cvCreateImage(cv.cvSize(self.ipl_image.width, self.ipl_image.height), self.ipl_image.depth, 3)
cv.cvCvtColor(self.ipl_image, rgb, cv.CV_BGR2RGB)
self.assert_(a_1d.tostring() == rgb.imageData,
'The returned image has not been properly constructed.')
else:
self.assert_(a_1d.tostring() == self.ipl_image.imageData,
'The returned image has not been properly constructed.')
def test06_NumPy2Ipl(self):
"""Test the adaptors.NumPy2Ipl function."""
a = adaptors.Ipl2NumPy(self.ipl_image)
b = adaptors.NumPy2Ipl(a)
self.assert_(self.ipl_image.imageData == b.imageData,
'The returned image has not been properly constructed.')
def load_image( self, fname ):
self.ipl_image = highgui.cvLoadImage(fname, 4|2)
self.pil_image = PIL.Image.open(fname, 'r')
class AdaptorsTestCase1(AdaptorsTestCase):
def setUp( self ):
self.load_image( os.path.join(cvtestutils.datadir(),'images','cvSetMouseCallback.jpg'))
class AdaptorsTestCase2(AdaptorsTestCase):
def setUp( self ):
self.load_image( os.path.join(cvtestutils.datadir(),'images','baboon.jpg'))
def suite():
cases=[]
cases.append( unittest.TestLoader().loadTestsFromTestCase( AdaptorsTestCase1 ) )
cases.append( unittest.TestLoader().loadTestsFromTestCase( AdaptorsTestCase2 ) )
return unittest.TestSuite(cases)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
lgpl-2.1
|
philanthropy-u/edx-platform
|
openedx/core/djangoapps/oauth_dispatch/models.py
|
4
|
4648
|
"""
Specialized models for oauth_dispatch djangoapp
"""
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_mysql.models import ListCharField
from oauth2_provider.settings import oauth2_settings
from organizations.models import Organization
from pytz import utc
from openedx.core.djangoapps.oauth_dispatch.toggles import ENFORCE_JWT_SCOPES
from openedx.core.lib.request_utils import get_request_or_stub
class RestrictedApplication(models.Model):
"""
This model lists which django-oauth-toolkit Applications are considered 'restricted'
and thus have a limited ability to use various APIs.
A restricted Application will only get expired token/JWT payloads
so that they cannot be used to call into APIs.
"""
application = models.ForeignKey(oauth2_settings.APPLICATION_MODEL, null=False, on_delete=models.CASCADE)
class Meta:
app_label = 'oauth_dispatch'
def __unicode__(self):
"""
Return a unicode representation of this object
"""
return u"<RestrictedApplication '{name}'>".format(
name=self.application.name
)
@classmethod
def should_expire_access_token(cls, application):
set_token_expired = not ENFORCE_JWT_SCOPES.is_enabled()
jwt_not_requested = get_request_or_stub().POST.get('token_type', '').lower() != 'jwt'
restricted_application = cls.objects.filter(application=application).exists()
return restricted_application and (jwt_not_requested or set_token_expired)
@classmethod
def verify_access_token_as_expired(cls, access_token):
"""
For access_tokens for RestrictedApplications, make sure that the expiry date
is set at the beginning of the epoch which is Jan. 1, 1970
"""
return access_token.expires == datetime(1970, 1, 1, tzinfo=utc)
class ApplicationAccess(models.Model):
"""
Specifies access control information for the associated Application.
"""
application = models.OneToOneField(oauth2_settings.APPLICATION_MODEL, related_name='access')
scopes = ListCharField(
base_field=models.CharField(max_length=32),
size=25,
max_length=(25 * 33), # 25 * 32 character scopes, plus commas
help_text=_('Comma-separated list of scopes that this application will be allowed to request.'),
)
class Meta:
app_label = 'oauth_dispatch'
@classmethod
def get_scopes(cls, application):
return cls.objects.get(application=application).scopes
def __unicode__(self):
"""
Return a unicode representation of this object.
"""
return u"{application_name}:{scopes}".format(
application_name=self.application.name,
scopes=self.scopes,
)
class ApplicationOrganization(models.Model):
"""
Associates a DOT Application to an Organization.
See openedx/core/djangoapps/oauth_dispatch/docs/decisions/0007-include-organizations-in-tokens.rst
for the intended use of this model.
"""
RELATION_TYPE_CONTENT_ORG = 'content_org'
RELATION_TYPES = (
(RELATION_TYPE_CONTENT_ORG, _('Content Provider')),
)
application = models.ForeignKey(oauth2_settings.APPLICATION_MODEL, related_name='organizations')
organization = models.ForeignKey(Organization)
relation_type = models.CharField(
max_length=32,
choices=RELATION_TYPES,
default=RELATION_TYPE_CONTENT_ORG,
)
class Meta:
app_label = 'oauth_dispatch'
unique_together = ('application', 'relation_type', 'organization')
@classmethod
def get_related_org_names(cls, application, relation_type=None):
"""
Return the names of the Organizations related to the given DOT Application.
Filter by relation_type if provided.
"""
queryset = application.organizations.all()
if relation_type:
queryset = queryset.filter(relation_type=relation_type)
return [r.organization.name for r in queryset]
def __unicode__(self):
"""
Return a unicode representation of this object.
"""
return u"{application_name}:{organization}:{relation_type}".format(
application_name=self.application.name,
organization=self.organization.short_name,
relation_type=self.relation_type,
)
def to_jwt_filter_claim(self):
"""
Serialize for use in JWT filter claim.
"""
return unicode(':'.join([self.relation_type, self.organization.short_name]))
|
agpl-3.0
|
drawcode/yaml-cpp.new-api
|
test/gmock-1.7.0/gtest/test/gtest_uninitialized_test.py
|
2901
|
2480
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
cunningchloe/gwiki
|
extensions/ConfirmEdit/captcha.py
|
47
|
7848
|
#!/usr/bin/python
#
# Script to generate distorted text images for a captcha system.
#
# Copyright (C) 2005 Neil Harris
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# Further tweaks by Brion Vibber <[email protected]>:
# 2006-01-26: Add command-line options for the various parameters
# 2007-02-19: Add --dirs param for hash subdirectory splits
# Tweaks by Greg Sabino Mullane <[email protected]>:
# 2008-01-06: Add regex check to skip words containing other than a-z
import random
import math
import hashlib
from optparse import OptionParser
import os
import sys
import re
try:
import Image
import ImageFont
import ImageDraw
import ImageEnhance
import ImageOps
except:
sys.exit("This script requires the Python Imaging Library - http://www.pythonware.com/products/pil/")
nonalpha = re.compile('[^a-z]') # regex to test for suitability of words
# Does X-axis wobbly copy, sandwiched between two rotates
def wobbly_copy(src, wob, col, scale, ang):
x, y = src.size
f = random.uniform(4*scale, 5*scale)
p = random.uniform(0, math.pi*2)
rr = ang+random.uniform(-30, 30) # vary, but not too much
int_d = Image.new('RGB', src.size, 0) # a black rectangle
rot = src.rotate(rr, Image.BILINEAR)
# Do a cheap bounding-box op here to try to limit work below
bbx = rot.getbbox()
if bbx == None:
return src
else:
l, t, r, b= bbx
# and only do lines with content on
for i in range(t, b+1):
# Drop a scan line in
xoff = int(math.sin(p+(i*f/y))*wob)
xoff += int(random.uniform(-wob*0.5, wob*0.5))
int_d.paste(rot.crop((0, i, x, i+1)), (xoff, i))
# try to stop blurring from building up
int_d = int_d.rotate(-rr, Image.BILINEAR)
enh = ImageEnhance.Sharpness(int_d)
return enh.enhance(2)
def gen_captcha(text, fontname, fontsize, file_name):
"""Generate a captcha image"""
# white text on a black background
bgcolor = 0x0
fgcolor = 0xffffff
# create a font object
font = ImageFont.truetype(fontname,fontsize)
# determine dimensions of the text
dim = font.getsize(text)
# create a new image significantly larger that the text
edge = max(dim[0], dim[1]) + 2*min(dim[0], dim[1])
im = Image.new('RGB', (edge, edge), bgcolor)
d = ImageDraw.Draw(im)
x, y = im.size
# add the text to the image
d.text((x/2-dim[0]/2, y/2-dim[1]/2), text, font=font, fill=fgcolor)
k = 3
wob = 0.20*dim[1]/k
rot = 45
# Apply lots of small stirring operations, rather than a few large ones
# in order to get some uniformity of treatment, whilst
# maintaining randomness
for i in range(k):
im = wobbly_copy(im, wob, bgcolor, i*2+3, rot+0)
im = wobbly_copy(im, wob, bgcolor, i*2+1, rot+45)
im = wobbly_copy(im, wob, bgcolor, i*2+2, rot+90)
rot += 30
# now get the bounding box of the nonzero parts of the image
bbox = im.getbbox()
bord = min(dim[0], dim[1])/4 # a bit of a border
im = im.crop((bbox[0]-bord, bbox[1]-bord, bbox[2]+bord, bbox[3]+bord))
# and turn into black on white
im = ImageOps.invert(im)
# save the image, in format determined from filename
im.save(file_name)
def gen_subdir(basedir, md5hash, levels):
"""Generate a subdirectory path out of the first _levels_
characters of _hash_, and ensure the directories exist
under _basedir_."""
subdir = None
for i in range(0, levels):
char = md5hash[i]
if subdir:
subdir = os.path.join(subdir, char)
else:
subdir = char
fulldir = os.path.join(basedir, subdir)
if not os.path.exists(fulldir):
os.mkdir(fulldir)
return subdir
def try_pick_word(words, blacklist, verbose):
word1 = words[random.randint(0,len(words)-1)]
word2 = words[random.randint(0,len(words)-1)]
word = word1+word2
if verbose:
print "word is %s" % word
if nonalpha.search(word):
if verbose:
print "skipping word pair '%s' because it contains non-alphabetic characters" % word
return None
for naughty in blacklist:
if naughty in word:
if verbose:
print "skipping word pair '%s' because it contains blacklisted word '%s'" % (word, naughty)
return None
return word
def pick_word(words, blacklist, verbose):
for x in range(1000): # If we can't find a valid combination in 1000 tries, just give up
word = try_pick_word(words, blacklist, verbose)
if word:
return word
sys.exit("Unable to find valid word combinations")
def read_wordlist(filename):
return [x.strip().lower() for x in open(wordlist).readlines()]
if __name__ == '__main__':
"""This grabs random words from the dictionary 'words' (one
word per line) and generates a captcha image for each one,
with a keyed salted hash of the correct answer in the filename.
To check a reply, hash it in the same way with the same salt and
secret key, then compare with the hash value given.
"""
parser = OptionParser()
parser.add_option("--wordlist", help="A list of words (required)", metavar="WORDS.txt")
parser.add_option("--key", help="The passphrase set as $wgCaptchaSecret (required)", metavar="KEY")
parser.add_option("--output", help="The directory to put the images in - $wgCaptchaDirectory (required)", metavar="DIR")
parser.add_option("--font", help="The font to use (required)", metavar="FONT.ttf")
parser.add_option("--font-size", help="The font size (default 40)", metavar="N", type='int', default=40)
parser.add_option("--count", help="The maximum number of images to make (default 20)", metavar="N", type='int', default=20)
parser.add_option("--blacklist", help="A blacklist of words that should not be used", metavar="FILE")
parser.add_option("--fill", help="Fill the output directory to contain N files, overrides count, cannot be used with --dirs", metavar="N", type='int')
parser.add_option("--dirs", help="Put the images into subdirectories N levels deep - $wgCaptchaDirectoryLevels", metavar="N", type='int')
parser.add_option("--verbose", "-v", help="Show debugging information", action='store_true')
opts, args = parser.parse_args()
if opts.wordlist:
wordlist = opts.wordlist
else:
sys.exit("Need to specify a wordlist")
if opts.key:
key = opts.key
else:
sys.exit("Need to specify a key")
if opts.output:
output = opts.output
else:
sys.exit("Need to specify an output directory")
if opts.font and os.path.exists(opts.font):
font = opts.font
else:
sys.exit("Need to specify the location of a font")
blacklistfile = opts.blacklist
count = opts.count
fill = opts.fill
dirs = opts.dirs
verbose = opts.verbose
fontsize = opts.font_size
if fill:
count = max(0, fill - len(os.listdir(output)))
words = read_wordlist(wordlist)
words = [x for x in words
if len(x) in (4,5) and x[0] != "f"
and x[0] != x[1] and x[-1] != x[-2]]
if blacklistfile:
blacklist = read_wordlist(blacklistfile)
else:
blacklist = []
for i in range(count):
word = pick_word(words, blacklist, verbose)
salt = "%08x" % random.randrange(2**32)
# 64 bits of hash is plenty for this purpose
md5hash = hashlib.md5(key+salt+word+key+salt).hexdigest()[:16]
filename = "image_%s_%s.png" % (salt, md5hash)
if dirs:
subdir = gen_subdir(output, md5hash, dirs)
filename = os.path.join(subdir, filename)
if verbose:
print filename
gen_captcha(word, font, fontsize, os.path.join(output, filename))
|
gpl-2.0
|
Johnzero/OE7
|
openerp/addons/web/tests/test_js.py
|
69
|
1238
|
import urlparse
from openerp import sql_db, tools
from qunitsuite.suite import QUnitSuite
class WebSuite(QUnitSuite):
def __init__(self):
url = urlparse.urlunsplit([
'http',
'localhost:{port}'.format(port=tools.config['xmlrpc_port']),
'/web/tests',
'mod=*&source={db}&supadmin={supadmin}&password={password}'.format(
db=tools.config['db_name'],
# al: i dont understand why both are needed, db_password is the
# password for postgres and should not appear here of that i'm
# sure
#
# But runbot provides it with this wrong key so i let it here
# until it's fixed
supadmin=tools.config['db_password'] or 'admin',
password=tools.config['admin_passwd'] or 'admin'),
''
])
super(WebSuite, self).__init__(url, 50000)
def run(self, result):
if sql_db._Pool is not None:
sql_db._Pool.close_all(sql_db.dsn(tools.config['db_name']))
return super(WebSuite, self).run(result)
def load_tests(loader, standard_tests, _):
standard_tests.addTest(WebSuite())
return standard_tests
|
agpl-3.0
|
aksareen/balrog
|
scripts/dump-json.py
|
3
|
1144
|
#!/usr/bin/env python
from os import path
import sys
import simplejson as json
# Our parent directory should contain the auslib module, so we add it to the
# PYTHONPATH to make things easier on consumers.
sys.path.append(path.join(path.dirname(__file__), ".."))
from auslib.db import AUSDatabase
if __name__ == "__main__":
from optparse import OptionParser
doc = "%s --db dburi -r release-name" % sys.argv[0]
parser = OptionParser(doc)
parser.add_option("-d", "--db", dest="db", default=None, help="database to manage, in URI format")
parser.add_option("-r", "--release", dest="release", default=None, help="Release to retrieve blob for")
parser.add_option("-u", "--ugly", dest="ugly", default=False, action="store_true", help="Don't format output")
options, args = parser.parse_args()
if not options.db or not options.release:
print "db and release are required"
print __doc__
sys.exit(1)
db = AUSDatabase(options.db)
blob = db.releases.getReleaseBlob(options.release)
if options.ugly:
print json.dumps(blob)
else:
print json.dumps(blob, indent=4)
|
mpl-2.0
|
BadgerMaps/django-allauth
|
allauth/socialaccount/providers/angellist/provider.py
|
75
|
1034
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AngelListAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('angellist_url')
def get_avatar_url(self):
return self.account.extra_data.get('image')
def to_str(self):
dflt = super(AngelListAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class AngelListProvider(OAuth2Provider):
id = 'angellist'
name = 'AngelList'
package = 'allauth.socialaccount.providers.angellist'
account_class = AngelListAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
username=data.get('angellist_url').split('/')[-1],
name=data.get('name'))
providers.registry.register(AngelListProvider)
|
mit
|
ZenDevelopmentSystems/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
hlin117/scikit-learn
|
sklearn/neighbors/lof.py
|
33
|
12186
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from warnings import warn
from scipy.stats import scoreatpercentile
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:ref:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the decision function.
n_jobs : int, optional (default=1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The lower, the more normal.
Inliers tend to have a LOF score close to 1, while outliers tend
to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination=0.1, n_jobs=1):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
def fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels
(1 inlier, -1 outlier) on the training set according to the LOF score
and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
Returns
-------
self : object
Returns self.
"""
if not (0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5]")
super(LocalOutlierFactor, self).fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define threshold_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
self.threshold_ = -scoreatpercentile(
-self.negative_outlier_factor_, 100. * (1. - self.contamination))
return self
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
This method allows to generalize prediction to new observations (not
in the training set). As LOF originally does not deal with new data,
this method is kept private.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self._decision_function(X) <= self.threshold_] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ <= self.threshold_] = -1
return is_inlier
def _decision_function(self, X):
"""Opposite of the Local Outlier Factor of X (as bigger is better,
i.e. large values correspond to inliers).
The argument X is supposed to contain *new data*: if X contains a
point from training, it consider the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The decision function on training data is available by considering the
opposite of the negative_outlier_factor_ attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
|
bsd-3-clause
|
RannyeriDev/Solfege
|
solfege/mpd/mpdutils.py
|
4
|
3482
|
# GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2007, 2008, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
"""
Only utility functions and classes that are private to the mpd module
should go into this file.
"""
import re
from solfege.mpd.musicalpitch import MusicalPitch
def int_to_octave_notename(i):
return MusicalPitch.new_from_int(i).get_octave_notename()
def int_to_user_octave_notename(i):
return MusicalPitch.new_from_int(i).get_user_octave_notename()
def notename_to_int(n):
return MusicalPitch.new_from_notename(n).semitone_pitch()
def key_to_accidentals(key):
i = ['aeses', 'eeses', 'beses', 'fes', 'ces', 'ges', 'des', 'aes',
'ees', 'bes', 'f', 'c', 'g', 'd', 'a', 'e', 'b', 'fis', 'cis',
'gis', 'dis', 'ais', 'eis', 'bis'].index(key[0])-11
if key[1] == 'minor':
i = i - 3
if i > 0:
r = ['fis', 'cis', 'gis', 'dis', 'ais', 'eis',
'bis', 'fis', 'cis', 'gis', 'dis'][:i]
m = 'is'
elif i < 0:
r = ['bes', 'ees', 'aes', 'des', 'ges', 'ces',
'fes', 'bes', 'ees', 'aes', 'des'][:-i]
m = 'es'
else:
r = []
retval = []
for a in r:
if a not in retval:
retval.append(a)
else:
del retval[retval.index(a)]
retval.append(a+m)
return retval
def find_possible_first_note(music):
"""
Return a tuple of 2 integer locating what we believe is the first
pitch (but do not include the duration).
Return the location of the text we don't understand if we are not able
to parse the music.
"""
i = 0
# FIXME regexes are modified copies from the mpd Lexer. Try to reuse
# code in the future.
re_white = re.compile(r"\s+")
re_clef = re.compile(r"\\clef\s+(\w*)", re.UNICODE)
re_clef_quoted = re.compile(r"\\clef\s+\"([A-Za-z1-9]+[_^1-9]*)\"", re.UNICODE)
re_time = re.compile(r"\\time\s+(\d+)\s*/\s*(\d+)", re.UNICODE)
re_times = re.compile(r"\\times\s+(\d+)\s*/\s*(\d+)\s*{", re.UNICODE)
re_key = re.compile(r"\\key\s+([a-z]+)\s*\\(major|minor)", re.UNICODE)
re_note = re.compile("(?P<beamstart>(\[\s*)?)(?P<chordstart>(\<\s*)?)(?P<pitchname>[a-zA-Z]+[',]*)(\d+\.*)?")
i = 0
re_list = re_white, re_clef_quoted, re_clef, re_key, re_times, re_time, re_note
while 1:
for r in re_list:
m = r.match(music[i:])
if m:
if r != re_note:
i += m.end()
break
elif r == re_note:
assert m
i += len(m.group("beamstart"))
i += len(m.group("chordstart"))
return i, i + len(m.group('pitchname'))
elif r == re_list[-1]:
return i, i+1
|
gpl-3.0
|
gdevanla/hyde
|
hydeengine/site_post_processors.py
|
40
|
9772
|
from __future__ import with_statement
import os, re, string, subprocess, codecs
from django.conf import settings
from django.template.loader import render_to_string
from file_system import File
from datetime import datetime
from hydeengine.templatetags.hydetags import xmldatetime
import commands
class YUICompressor:
@staticmethod
def process(folder, params):
class Compressor:
def visit_file(self, thefile):
if settings.YUI_COMPRESSOR == None:
return
compress = settings.YUI_COMPRESSOR
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
if not compress or not os.path.exists(compress):
raise ValueError(
"YUI Compressor cannot be found at [%s]" % compress)
tmp_file = File(thefile.path + ".z-tmp")
status, output = commands.getstatusoutput(
u"java -jar %s %s > %s" % (compress, thefile.path, tmp_file.path))
if status > 0:
print output
else:
thefile.delete()
tmp_file.move_to(thefile.path)
folder.walk(Compressor(), "*.css")
class FolderFlattener:
@staticmethod
def process(folder, params):
class Flattener:
def __init__(self, folder, params):
self.folder = folder
self.remove_processed_folders = \
params["remove_processed_folders"]
self.previous_folder = None
def visit_file(self, file):
if not self.folder.is_parent_of(file):
file.copy_to(self.folder)
def visit_folder(self, this_folder):
if self.previous_folder and self.remove_processed_folders:
self.previous_folder.delete()
if not self.folder.same_as(this_folder):
self.previous_folder = this_folder
def visit_complete(self):
if self.previous_folder and self.remove_processed_folders:
self.previous_folder.delete()
folder.walk(Flattener(folder, params), params["pattern"])
SITEMAP_CONFIG = \
"""<?xml version="1.0" encoding="UTF-8"?>
<site
base_url="%(base_url)s"
store_into="%(sitemap_path)s"
suppress_search_engine_notify="1"
verbose="1"
>
<urllist path="%(url_list_file)s"/>
</site>"""
class GoogleSitemapGenerator:
@staticmethod
def process(folder, params):
site = settings.CONTEXT['site']
sitemap_path = params["sitemap_file"]
url_list_file = File(sitemap_path).parent.child("urllist.txt")
config_file = File(sitemap_path).parent.child("sitemap_config.xml")
urllist = open(url_list_file, 'w')
for page in site.walk_pages():
if not page.display_in_list and not page.listing:
continue
created = xmldatetime(page.created)
updated = xmldatetime(page.updated)
url = page.full_url
priority = 0.5
if page.listing:
priority = 1.0
changefreq = "weekly"
urllist.write(
"%(url)s lastmod=%(updated)s changefreq=%(changefreq)s \
priority=%(priority).1f\n"
% locals())
urllist.close()
base_url = settings.SITE_WWW_URL
config = open(config_file, 'w')
config.write(SITEMAP_CONFIG % locals())
config.close()
generator = params["generator"]
command = u"python %s --config=%s" % (generator, config_file)
status, output = commands.getstatusoutput(command)
if status > 0:
print output
File(config_file).delete()
File(url_list_file).delete()
class RssGenerator:
"""
Can create a rss feed for a blog and its categories whenever
specified in params
"""
@staticmethod
def process(folder, params):
#defaults initialisation
site = settings.CONTEXT['site']
node = params['node']
by_categories = False
categories = None
output_folder = 'feed'
generator = Rss2FeedGenerator()
if params.has_key('output_folder'):
output_folder = params['output_folder']
if params.has_key('generate_by_categories'):
by_categories = params['generate_by_categories']
if hasattr(node, 'categories'):
categories = node.categories
if categories != None:
#feed generation for each category
for category in categories:
#create a ContentNode adapter for categories to walk through the collection (walk_pages function)
#the same way than through the site's ContentNode
category_adapter = ContentNodeAdapter(category)
feed = generator.generate(category_adapter)
feed_filename = "%s.xml" % (category["name"].lower().replace(' ','_'))
feed_url = "%s/%s/%s/%s" % (settings.SITE_WWW_URL, site.url, output_folder, feed_filename)
category["feed_url"] = feed_url
RssGenerator._write_feed(feed, output_folder, feed_filename)
feed = generator.generate(node)
node.feed_url = "%s/%s/%s/%s" % (settings.SITE_WWW_URL, site.url, output_folder, "feed.xml")
RssGenerator._write_feed(feed, output_folder, "feed.xml")
@staticmethod
def _write_feed(feed, folder, file_name):
output = os.path.join(settings.CONTENT_DIR, folder)
if not os.path.isdir(output):
os.makedirs(output)
filename = os.path.join(output, file_name)
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(feed)
class ContentNodeAdapter:
"""
Adapter for a collection of posts to fulfill the ContentNode
walk_pages contract
"""
def __init__(self, category):
self.category = category
def walk_pages(self):
for post in self.category["posts"]:
yield post
class FeedGenerator:
"""
Base abstract class for the generation of syndication feeds
"""
def __init__(self):
pass
def generate(self, node):
"""
Template method calling child implementations
"""
#generate items
items = self.generate_items(node)
#generate feed with items inside
feed = self.generate_feed(items)
return feed
def generate_items(self, node):
raise TypeError('abstract function')
def generate_feed(self, items):
raise TypeError('abstract function')
RSS2_FEED = \
"""<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
<channel>
<title>%(title)s</title>
<link>%(url)s/</link>
<description>%(description)s</description>
<language>%(language)s</language>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<generator>Hyde</generator>
<webMaster>%(webmaster)s</webMaster>
%(items)s
</channel>
</rss>"""
RSS2_ITEMS = \
"""
<item>
<title>%(item_title)s</title>
<link>%(item_link)s</link>
<guid>%(guid)s</guid>
<description><![CDATA[%(description)s]]></description>
<pubDate>%(publication_date)s</pubDate>
<author>%(author)s</author>
</item>"""
class Rss2FeedGenerator(FeedGenerator):
"""
Implementation of a rss version 2 generator
"""
def __init__(self):
FeedGenerator.__init__(self)
self.re_content = re.compile(r"<!-- Hyde::Article::Begin -->(.*)<!-- Hyde::Article::End -->", re.DOTALL)
self.date_format = hasattr(settings, "POST_DATE_FORMAT") and \
settings.POST_DATE_FORMAT or "%d %b %y %H:%M GMT"
def generate_items(self, node):
items = ""
author = settings.SITE_AUTHOR_EMAIL or [''][0]
for post in node.walk_pages():
if hasattr(post, 'listing') and post.listing:
continue
item_title = post.title
item_link = post.full_url
guid = post.full_url
description = self.re_content.findall(post.temp_file.read_all())
description = len(description) > 0 and description[0] or ""
description = description.decode("utf-8")
publication_date = post.created.strftime(self.date_format)
cur_item = RSS2_ITEMS % locals()
items = "%s%s" % (items, cur_item)
return items
def generate_feed(self, items):
title = settings.SITE_NAME
url = settings.SITE_WWW_URL
description = ''
language = settings.LANGUAGE_CODE or 'en-us'
webmaster = settings.SITE_AUTHOR_EMAIL
return RSS2_FEED % locals()
class WhiteSpaceVisitor(object):
def __init__(self, **kw):
self.operations = []
del kw['node']
for k,v in kw.items():
if v == True and hasattr(self, k):
self.operations.append(getattr(self, k))
elif not hasattr(self, k):
raise Exception("Unknown WhiteSpaceRemover operation: %s" % k)
def visit_file(self, a_file):
for oper in self.operations:
oper(a_file)
def remove_document_leading_whitespace(self, a_file):
lines = a_file.read_all().splitlines()
while lines[0] == '':
lines.pop(0)
a_file.write(unicode("\n".join(lines), 'utf8'))
class WhiteSpaceRemover:
@staticmethod
def process(folder, params):
visitor = WhiteSpaceVisitor(**params)
extensions = ['html', 'xml']
if params.has_key('extensions'):
extensions = params['extensions']
for ext in extensions:
folder.walk(visitor, '*.%s' % ext)
|
mit
|
tex0l/JukeBox
|
display_LCDd_2x40.py
|
1
|
5980
|
from __future__ import unicode_literals
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from lcdproc.server import Server
from threading import Thread, Event, Timer, Lock
import time
import logging
from unidecode import unidecode
import music_player
class LockableServer(Server):
"""
A subclass of lcdproc Server to make it thread-safe
"""
def __init__(self, hostname, port):
super(LockableServer, self).__init__(hostname=hostname, port=port)
self._lock = Lock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def __enter__(self):
self.acquire()
# noinspection PyShadowingBuiltins,PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
def __exit__(self, type, value, traceback):
self.release()
class UpdateThread(Thread):
"""
A thread to update the display regularly
"""
def __init__(self, display, loaded_config):
Thread.__init__(self, name='UpdateThread')
self.alive = Event()
self.alive.set()
self.display = display
self.player = music_player.Player(loaded_config)
self.playing = ["",""]
self.loaded_config = loaded_config
def run(self):
logging.debug("Starting updating thread ")
while self.alive.isSet():
time.sleep(0.25)
self.display.set_queue(self.player.queue_count())
self.display.waiting_entry()
if self.player.is_playing():
if self.player.index() != self.playing:
self.display.playing_song(self.player.index(), self.player.title(), self.player.artist())
else:
self.display.waiting()
self.playing = ""
def join(self, timeout=None):
self.alive.clear()
return super(UpdateThread, self).join(timeout)
class DisplayLCDd2x40:
"""
A class to handle all the display functions of the jukebox and actually display them on a 40x2
display through the python lcdproc module
"""
def __init__(self, loaded_config):
self.loaded_config = loaded_config
self.lcd = LockableServer(hostname=self.loaded_config.lcd['lcdd_host'],
port=self.loaded_config.lcd['lcdd_port'])
with self.lcd:
self.lcd.start_session()
self.screen = self.lcd.add_screen(unidecode("jukebox"))
self.screen.set_heartbeat(unidecode("off"))
self.screen.set_priority(unidecode("foreground"))
self.entry_string = self.screen.add_scroller_widget(unidecode("entry"),
text=unidecode("Choose song"), left=1, top=1,
right=28, bottom=1, speed=4)
self.queue_string = self.screen.add_string_widget(unidecode("queue"),
text=unidecode("Queue : 0"), x=30, y=1)
self.icon = self.screen.add_icon_widget(unidecode("playIcon"), x=1, y=2, name=unidecode("STOP"))
self.playing_string = self.screen.add_scroller_widget(unidecode("playing"),
text=unidecode("Nothing in the playlist."
" Add a song ?"),
left=3, top=2, right=40, bottom=2, speed=4)
self.UT = UpdateThread(self, loaded_config)
self.UT.start()
self.timer = None
self.entryInProgress = False
self.lastAdded = time.time()
self.queue = 0
def set_queue(self, q):
"""
Change the length of the queue displayed on the LCD
"""
self.queue = q
with self.lcd:
self.queue_string.set_text(unidecode("Queue : %d" % q))
def waiting(self):
"""
Tell the display that no song is playing
"""
with self.lcd:
self.icon.set_name(unidecode("STOP"))
self.playing_string.set_text(unidecode("Nothing in the playlist. Add a song ?"))
def playing_song(self, index, title, artist):
#TODO
"""
Tell the display which song is playing
"""
with self.lcd:
self.icon.set_name(unidecode("PLAY"))
index = unicode(index[0])+unicode(index[1])
text = "%s - %s - %s" % (index, title, artist)
self.playing_string.set_text(unidecode(text))
def remove_entry(self):
"""
Tell the display that there is no entry
"""
self.entryInProgress = False
self.waiting_entry()
def waiting_entry(self):
"""
The display waits for an entry
"""
if self.entryInProgress is False:
if (self.queue < self.loaded_config.variables['nb_music']) \
or (time.time() - self.lastAdded > self.loaded_config.variables['add_timeout']):
with self.lcd:
self.entry_string.set_text(unidecode("Choose song"))
else:
text = "Wait %s seconds" % (
int(self.loaded_config.variables['add_timeout'] + 1 - time.time() + self.lastAdded))
with self.lcd:
self.entry_string.set_text(unidecode(text))
def entry(self, entry, song=None):
"""
The display shows the current entry
"""
self.entryInProgress = True
text = "Entry : %s" % entry
if self.timer is not None:
self.timer.cancel()
if song is not None:
text += " - %s - %s" % (song.name, song.artist)
self.lastAdded = time.time()
self.timer = Timer(5, self.remove_entry)
self.timer.start()
with self.lcd:
self.entry_string.set_text(unidecode(text))
|
apache-2.0
|
HalCanary/skia-hc
|
infra/bots/recipes/android_compile.py
|
3
|
5523
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
DEPS = [
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
CF_X86_PHONE_ENG_LUNCH_TARGET = 'cf_x86_phone-eng'
SDK_LUNCH_TARGET = 'sdk'
LUNCH_TARGET_TO_MMMA_TARGETS = {
CF_X86_PHONE_ENG_LUNCH_TARGET: (
'frameworks/base/core/jni,frameworks/base/libs/hwui,external/skia'),
SDK_LUNCH_TARGET: 'external/skia',
}
def RunSteps(api):
api.vars.setup()
if not api.vars.is_trybot:
# This bot currently only supports trybot runs because:
# Non-trybot runs could fail if the Android tree is red. We mitigate this
# for trybot runs by verifying that runs without the patch succeed. We do
# not currently have a way to do the same for non-trybot runs.
raise Exception('%s can only be run as a trybot.' % api.vars.builder_name)
if CF_X86_PHONE_ENG_LUNCH_TARGET in api.vars.builder_name:
lunch_target = CF_X86_PHONE_ENG_LUNCH_TARGET
mmma_targets = LUNCH_TARGET_TO_MMMA_TARGETS[lunch_target]
elif SDK_LUNCH_TARGET in api.vars.builder_name:
lunch_target = SDK_LUNCH_TARGET
mmma_targets = LUNCH_TARGET_TO_MMMA_TARGETS[SDK_LUNCH_TARGET]
else:
raise Exception('Lunch target in %s is not recognized.' %
api.vars.builder_name)
infrabots_dir = api.path['start_dir'].join('skia', 'infra', 'bots')
trigger_wait_ac_script = infrabots_dir.join('android_compile',
'trigger_wait_ac_task.py')
# Trigger a compile task on the android compile server and wait for it to
# complete.
cmd = ['python', trigger_wait_ac_script,
'--lunch_target', lunch_target,
'--mmma_targets', mmma_targets,
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
'--builder_name', api.vars.builder_name,
]
try:
with api.context(cwd=api.path['start_dir'].join('skia')):
api.run(api.step, 'Trigger and wait for task on android compile server', cmd=cmd)
except api.step.StepFailure as e:
# Add withpatch and nopatch logs as links (if they exist).
gs_file = 'gs://android-compile-tasks/%s-%s-%s.json' % (
lunch_target, api.vars.issue, api.vars.patchset)
step_result = api.step('Get task log links',
['gsutil', 'cat', gs_file],
stdout=api.json.output())
task_json = step_result.stdout
if task_json.get('withpatch_log'):
api.step.active_result.presentation.links[
'withpatch compilation log link'] = task_json['withpatch_log']
if task_json.get('nopatch_log'):
api.step.active_result.presentation.links[
'nopatch compilation log link'] = task_json['nopatch_log']
# Add link to force sync of the Android checkout.
api.step.active_result.presentation.links['force sync link'] = (
'https://skia-android-compile.corp.goog/')
raise e
def GenTests(api):
yield(
api.test('android_compile_trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-cf_x86_phone-eng-Android_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
)
)
yield(
api.test('android_compile_sdk_trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-host-sdk-Android_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
)
)
yield(
api.test('android_compile_unrecognized_target') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-unrecognized-Android_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
) +
api.expect_exception('Exception')
)
yield(
api.test('android_compile_trybot_failure') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername='Build-Debian9-Clang-cf_x86_phone-eng-Android_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
) +
api.step_data('Trigger and wait for task on android compile server',
retcode=1) +
api.step_data('Get task log links',
stdout=api.raw_io.output(
'{"withpatch_log":"link1", "nopatch_log":"link2"}'))
)
yield(
api.test('android_compile_nontrybot') +
api.properties(
buildername='Build-Debian9-Clang-cf_x86_phone-eng-Android_Framework',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
) +
api.expect_exception('Exception')
)
|
bsd-3-clause
|
moandcompany/luigi
|
test/helpers.py
|
6
|
5988
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import luigi
import luigi.task_register
import luigi.cmdline_parser
from luigi.cmdline_parser import CmdlineParser
from luigi import six
import os
import unittest
def skipOnTravis(reason):
return unittest.skipIf(os.getenv('TRAVIS') == 'true', reason)
class with_config(object):
"""
Decorator to override config settings for the length of a function.
Usage:
.. code-block: python
>>> import luigi.configuration
>>> @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
...
>>> my_test()
baz
>>> @with_config({'hoo': {'bar': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("hoo", "bar"))
...
>>> my_test()
baz
buz
>>> @with_config({'foo': {'bar': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
...
>>> my_test()
baz
>>> @with_config({'foo': {'bur': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("foo", "bur"))
...
>>> my_test()
baz
buz
>>> @with_config({'foo': {'bur': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}}, replace_sections=True)
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("foo", "bur", "no_bur"))
...
>>> my_test()
baz
no_bur
"""
def __init__(self, config, replace_sections=False):
self.config = config
self.replace_sections = replace_sections
def _make_dict(self, old_dict):
if self.replace_sections:
old_dict.update(self.config)
return old_dict
def get_section(sec):
old_sec = old_dict.get(sec, {})
new_sec = self.config.get(sec, {})
old_sec.update(new_sec)
return old_sec
all_sections = itertools.chain(old_dict.keys(), self.config.keys())
return {sec: get_section(sec) for sec in all_sections}
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
import luigi.configuration
orig_conf = luigi.configuration.LuigiConfigParser.instance()
new_conf = luigi.configuration.LuigiConfigParser()
luigi.configuration.LuigiConfigParser._instance = new_conf
orig_dict = {k: dict(orig_conf.items(k)) for k in orig_conf.sections()}
new_dict = self._make_dict(orig_dict)
for (section, settings) in six.iteritems(new_dict):
new_conf.add_section(section)
for (name, value) in six.iteritems(settings):
new_conf.set(section, name, value)
try:
return fun(*args, **kwargs)
finally:
luigi.configuration.LuigiConfigParser._instance = orig_conf
return wrapper
class RunOnceTask(luigi.Task):
def __init__(self, *args, **kwargs):
super(RunOnceTask, self).__init__(*args, **kwargs)
self.comp = False
def complete(self):
return self.comp
def run(self):
self.comp = True
class LuigiTestCase(unittest.TestCase):
"""
Tasks registred within a test case will get unregistered in a finalizer
"""
def setUp(self):
super(LuigiTestCase, self).setUp()
self._stashed_reg = luigi.task_register.Register._get_reg()
def tearDown(self):
luigi.task_register.Register._set_reg(self._stashed_reg)
super(LuigiTestCase, self).tearDown()
def run_locally(self, args):
""" Helper for running tests testing more of the stack, the command
line parsing and task from name intstantiation parts in particular. """
temp = CmdlineParser._instance
try:
CmdlineParser._instance = None
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
finally:
CmdlineParser._instance = temp
return run_exit_status
def run_locally_split(self, space_seperated_args):
""" Helper for running tests testing more of the stack, the command
line parsing and task from name intstantiation parts in particular. """
return self.run_locally(space_seperated_args.split(' '))
class parsing(object):
"""
Convenient decorator for test cases to set the parsing environment.
"""
def __init__(self, cmds):
self.cmds = cmds
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
with CmdlineParser.global_instance(self.cmds, allow_override=True):
return fun(*args, **kwargs)
return wrapper
def in_parse(cmds, deferred_computation):
with CmdlineParser.global_instance(cmds):
deferred_computation()
|
apache-2.0
|
caktus/ibid
|
ibid/lib/dcwords.py
|
2
|
17546
|
#!/usr/bin/env python
# Copyright (c) 2009, Stefano Rivera
# Released under terms of the MIT/X/Expat Licence. See COPYING for details.
# Speaks NMDC protocol. Not widely tested.
# Assumes the hub uses UTF-8. Client interface uses unicode()
# Currently only implements chat, not file transfer
# a chatroom of None == public chat
import re
from twisted.protocols.basic import LineReceiver
from twisted.internet import protocol, reactor
import logging
log = logging.getLogger('dcclient')
class User(object):
"Represents a client connected to the hub"
def __init__(self, name):
self.name = name
for key in 'interest,client,upload_limit,download_limit,hubs,mode,auto_open,slots,client,mode,connection,away,email,sharesize,bot,op'.split(','):
setattr(self, key, None)
class DCClient(LineReceiver):
# Configuration:
# Attempt to keep the connection alive with periodic $GetNickLists
# if idle (rare on a busy server)
keepalive = True
ping_interval = 180
pong_timeout = 180
# Client information (mostly simply provided to server)
my_nickname = 'foo'
my_password = None
my_interest = ''
my_speed = '1kbps'
my_email = ''
my_away = 'normal'
my_sharesize = 0
my_mode = 'active'
my_hubs = (0, 0, 1)
my_slots = 0
old_version = '1.2'
client = 'TwisteDC'
version = 'dev'
auto_open = None
# Server Properties
hub_name = ''
hub_topic = ''
hub_motd = ''
hub_tagline = ''
hub_supports = ()
hub_users = {}
# LineReceiver:
delimiter = '|'
# State:
finished_handshake = False
_ping_deferred = None
_reconnect_deferred = None
# Callbacks:
def yourHost(self, name, topic, tagline, motd):
"Called with information about the server"
def bounce(self, destination):
"""Called with information about where the client should reconnect
or None, if the server is trying to get rid of us"""
def isupport(self, options):
"Called with extenisons the server supports"
def privmsg(self, user, private, message):
"Called when I have a message from a user to me or the chat"
def action(self, user, private, message):
"Called when I see an action in private or chat"
def signedOn(self):
"Called when successfully signed on"
def userJoined(self, user):
"Called when a user joins"
def userQuit(self, user):
"Called when a user leaves"
def topicUpdated(self, topic):
"Called when the topic is changed"
# Actions:
def say(self, user, message):
"Send a message to a user or chat if user=None"
if user is None:
self.sendLine('<%s> %s' % (
_encode_htmlent(self.my_nickname, '>'), _encode_htmlent(message)
))
else:
self.sendLine('$To: %s From: %s $<%s> %s' % (
_encode_htmlent(user, ' '),
_encode_htmlent(self.my_nickname, ' '),
_encode_htmlent(self.my_nickname, '>'),
_encode_htmlent(message),
))
def away(self, away='away'):
"Update the away status. For possible statuses, see _away"
self.away = away
self._sendMyINFO()
def back(self):
"Return to normal away status"
self.away = 'normal'
self._sendMyINFO()
def topic(self, topic):
"Set a new topic"
self.say(None, u'!topic ' + topic)
# Code:
# High Level Protocol:
def dc_HubIsFull(self, params):
log.debug("Hub is full")
def dc_Lock(self, params):
"Calculate the NMDC Lock code"
challange = params.split(' ', 1)[0]
key = {}
for i in xrange(1, len(challange)):
key[i] = ord(challange[i]) ^ ord(challange[i-1])
key[0] = ord(challange[0]) ^ ord(challange[len(challange)-1]) ^ ord(challange[len(challange)-2]) ^ 5
for i in xrange(0, len(challange)):
key[i] = ((key[i]<<4) & 240) | ((key[i]>>4) & 15)
response = ""
for i in xrange(0, len(key)):
if key[i] in (0, 5, 36, 96, 124, 126):
response += "/%%DCN%03d%%/" % (key[i],)
else:
response += chr(key[i])
if challange.startswith('EXTENDEDPROTOCOL'):
self.sendLine('$Supports HubTopic QuickList NoHello')
self.sendLine('$Key ' + response)
if not challange.startswith('EXTENDEDPROTOCOL'):
self.sendLine('$ValidateNick ' + _encode_htmlent(self.my_nickname))
# Otherwise defer registration to dc_Supports
def dc_HubName(self, params):
"Connected / Hub Name Changed"
self.hub_name = _decode_htmlent(params)
if 'HubTopic' not in self.hub_supports:
self.topicUpdated(self.hub_name)
def dc_HubTopic(self, params):
"Hub Topic changed"
self.hub_topic = _decode_htmlent(params)
self.topicUpdated(self.hub_topic)
def dc_Supports(self, params):
"Hub Extensions"
self.hub_supports = params.split(' ')
self.isupport(self.hub_supports)
if 'QuickList' not in self.hub_supports:
self.sendLine('$ValidateNick ' + _encode_htmlent(self.my_nickname))
elif self.my_password:
self._sendMyINFO()
if self.my_password is None:
if 'QuickList' not in self.hub_supports:
self.sendLine('$Version ' + _encode_htmlent(self.old_version))
self.sendLine('$GetNickList')
self._sendMyINFO()
def dc_ValidateDenide(self, params):
"Server didn't like the nick, try another"
self.my_nickname += '_'
log.error('Nickname rejected, trying %s', self.my_nickname)
self.sendLine('$ValidateNick ' + _encode_htmlent(self.my_nickname))
def dc_Hello(self, params):
"Someone arrived"
nick = _decode_htmlent(params)
if nick == self.my_nickname:
return
if nick not in self.hub_users:
self.hub_users[nick] = User(nick)
self.userJoined(nick)
def dc_GetPass(self, params):
"Password requested"
self.sendLine('$MyPass ' + _encode_htmlent(self.my_password))
def dc_BadPass(self, params):
"Password rejected"
log.error('Password rejected')
def dc_LogedIn(self, params):
"Password accepted"
if 'QuickList' not in self.hub_supports:
self.sendLine('$Version ' + _encode_htmlent(self.old_version))
self.sendLine('$GetNickList')
self._sendMyINFO()
def dc_MyINFO(self, params):
"Information about a user"
self._state_Connected()
m = re.match(r'^\$ALL (\S*) (.*?)(?:<(\S*) ([A-Z0-9.:,/]*)>)?'
r'\$(.)\$([^$]*)([^$])\$([^$]*)\$(\d*)\$$', params)
if not m:
log.error("Couldn't decode MyINFO: %s", params)
return
nick = _decode_htmlent(m.group(1))
if nick == self.my_nickname:
return
if nick in self.hub_users:
user = self.hub_users[nick]
else:
user = User(nick)
user.my_interest = _decode_htmlent(m.group(2))
user.client = (m.group(3) and _decode_htmlent(m.group(3)) or None, None)
if m.group(4):
for taglet in _decode_htmlent(m.group(4)).split(','):
try:
key, value = taglet.split(':', 1)
if key in ('B', 'L'):
user.upload_limit = float(value)
elif key == 'F':
user.download_limit, user.upload_limit = value.split('/', 1)
elif key == 'H':
user.hubs = value.split('/')
elif key == 'M':
user.mode = _rmodes[value]
elif key == 'O':
user.auto_open = float(value)
elif key == 'S':
user.slots = int(value)
elif key == 'V':
user.client = (m.group(3), value)
else:
log.error('Unknown tag key: %s:%s on user %s', key, value, nick)
except:
log.exception('Error parsing tag: %s', m.group(4))
if m.group(5) in _rmodes:
user.mode = _rmodes[m.group(5)]
user.connection = _decode_htmlent(m.group(6))
user.away = m.group(7) in _raway and _raway[m.group(7)] or 'normal'
user.email = _decode_htmlent(m.group(8))
user.sharesize = m.group(9) and int(m.group(9)) or 0
if nick not in self.hub_users:
self.hub_users[nick] = user
self.userJoined(nick)
def dc_OpList(self, params):
"List of Ops received"
for nick in params.split('$$'):
nick = _decode_htmlent(nick)
if nick == self.my_nickname:
continue
user = nick in self.hub_users and self.hub_users[nick] or User(nick)
user.op = True
if nick not in self.hub_users:
self.hub_users[nick] = user
self.userJoined(nick)
def dc_BotList(self, params):
"List of Bots received"
for nick in params.split('$$'):
nick = _decode_htmlent(nick)
if nick == self.my_nickname:
continue
user = nick in self.hub_users and self.hub_users[nick] or User(nick)
user.bot = True
if nick not in self.hub_users:
self.hub_users[nick] = user
self.userJoined(nick)
def dc_NickList(self, params):
"List of connected users received"
self._state_Connected()
if self._reconnect_deferred is not None:
log.log(logging.DEBUG - 5, u'Received PONG')
self._reconnect_deferred.cancel()
self._reconnect_deferred = None
self._ping_deferred = reactor.callLater(self.ping_interval, self._idle_ping)
oldlist = set(self.hub_users.keys())
for nick in params.split('$$'):
nick = _decode_htmlent(nick)
if nick == self.my_nickname:
continue
user = nick in self.hub_users and self.hub_users[nick] or User(nick)
if nick in self.hub_users:
oldlist.remove(nick)
else:
self.hub_users[nick] = user
self.userJoined(nick)
for nick in oldlist:
self.userQuit(nick)
del self.hub_users[nick]
def dc_ConnectToMe(self, params):
"Someone wants to connect to me"
#TODO
def dc_RevConnectToMe(self, params):
"Someone wants me to connect to them"
#TODO
def dc_Quit(self, params):
"Someone has gone home"
nick = _decode_htmlent(params)
if nick in self.hub_users:
self.userQuit(nick)
del self.hub_users[nick]
def dc_Search(self, params):
"Someone wants to find something"
#TODO
def dc_ForceMove(self, params):
"Redirecting elsewhere"
self.bounce(params and _decode_htmlent(params) or None)
def dc_UserCommand(self, params):
"Menu of Hub specific commands"
#TODO
def dc_UserIP(self, params):
"I asked for an IP, here it is"
#TODO
def dc_To(self, params):
"Received a private message"
to_re = re.compile(r'^.*? From: ([^$]*?) \$<[^>]*?> (.*)$', re.DOTALL)
m = to_re.match(params)
if m is None:
log.error('Cannot parse message: %s', params)
return
self.privmsg(_decode_htmlent(m.group(1)), True, _decode_htmlent(m.group(2)))
# Helpers:
def _state_Connected(self):
"Update the state that we are now connected and won't be reciveing MOTD any more"
if not self.finished_handshake:
self.finished_handshake = True
self.yourHost(self.hub_name, self.hub_topic, self.hub_tagline, self.hub_motd)
self.signedOn()
def _sendMyINFO(self):
"Tell the server all about me"
tags = []
if self.version:
tags.append('V:' + self.version)
if self.my_mode in _modes.keys():
tags.append('M:' + _modes[self.my_mode])
if self.my_hubs:
tags.append('H:' + '/'.join(str(x) for x in self.my_hubs))
if self.my_slots:
tags.append('S:%i' % self.my_slots)
if self.auto_open:
tags.append('O:' + self.auto_open)
tag = '%s %s' % (self.client, ','.join(tags))
away = _away[self.my_away]
self.sendLine('$MyINFO $ALL %s %s<%s>$ $%s%s$%s$%s$' % (
_encode_htmlent(self.my_nickname, ' '),
_encode_htmlent(self.my_interest),
_encode_htmlent(tag),
_encode_htmlent(self.my_speed),
away,
_encode_htmlent(self.my_email),
self.my_sharesize,
))
def _idle_ping(self):
"Fired when idle and keepalive is enabled"
log.log(logging.DEBUG - 5, u'Sending idle PING')
self._ping_deferred = None
self._reconnect_deferred = reactor.callLater(self.pong_timeout, self._timeout_reconnect)
self.sendLine('$GetNickList')
def _timeout_reconnect(self):
"Fired when pong never recived"
log.info(u'Ping-Pong timeout. Reconnecting')
self.transport.loseConnection()
# Low Level Protocol:
def connectionMade(self):
if self.keepalive:
self._ping_deferred = reactor.callLater(self.ping_interval, self._idle_ping)
def sendLine(self, line):
if self._ping_deferred:
self._ping_deferred.reset(self.ping_interval)
return LineReceiver.sendLine(self, line)
def lineReceived(self, line):
if self._ping_deferred:
self._ping_deferred.reset(self.ping_interval)
if line.strip() == '':
return
elif line[0] == '$':
command = line[1:].split(' ', 1)[0]
params = ' ' in line and line[1:].split(' ', 1)[1] or None
handler = getattr(self, 'dc_' + command.strip(':'), None)
if handler:
handler(params)
else:
log.error('Unhandled command received: %s', command)
return
elif line[0] == '<':
speaker, message = line[1:].split('>', 1)
speaker = _decode_htmlent(speaker)
message = _decode_htmlent(message[1:])
if not self.finished_handshake:
if not self.hub_tagline:
self.hub_tagline = message
else:
self.hub_motd += message + '\n'
else:
if speaker != self.my_nickname:
self.privmsg(speaker, False, message)
elif line.startswith('* ') or line.startswith('** '):
action = line.split(' ', 1)[1].split(' ', 1)
speaker = _decode_htmlent(action[0])
message = len(action) > 1 and _decode_htmlent(action[1]) or u''
if speaker != self.my_nickname:
self.action(speaker, False, message)
else:
log.error('Unrecognised command received: %s', line)
return
def _encode_htmlent(message, extra_enc=''):
"DC uses HTML entities to encode non-ASCII text. Encode."
if isinstance(message, unicode):
message = message.encode('utf-8')
replace = lambda match: '&#%i;' % ord(match.group(1))
return re.sub(r'([$|%s])' % extra_enc, replace, message)
def _decode_htmlent(message):
"DC uses HTML entities to encode non-ASCII text. Decode."
replace = lambda match: unichr(int(match.group(1)))
message = unicode(message, 'utf-8', 'replace')
message = re.sub(r'&#(\d+);', replace, message)
return re.sub(r'/%DCN(\d{3})%/', replace, message)
_modes = {
'active': 'A',
'passive': 'P',
'socks': '5',
}
_rmodes = dict((y, x) for x, y in _modes.iteritems())
_away = {
'normal': chr(1),
'away': chr(2),
'server': chr(4),
'server away': chr(6),
'fireball': chr(8),
'fireball away': chr(10),
}
_raway = dict((y, x) for x, y in _away.iteritems())
_raway.update({
chr(3): 'away',
chr(5): 'server',
chr(7): 'server away',
chr(9): 'fireball',
chr(11): 'fireball away',
})
# Small testing framework:
def main():
logging.basicConfig(level=logging.NOTSET)
class TestClient(DCClient):
def privmsg(self, user, private, message):
if 'test' in message:
self.say(private and user or None, '%s said %s' % (user, message))
self.say(None, '+me waves a test message')
def sendLine(self, line):
log.debug('> %s', line)
DCClient.sendLine(self, line)
def lineReceived(self, line):
log.debug('< %s', line)
DCClient.lineReceived(self, line)
class DCFactory(protocol.ClientFactory):
protocol = TestClient
def clientConnectionLost(self, connector, reason):
log.info('Lost')
reactor.stop()
def clientConnectionFailed(self, connector, reason):
log.info('Failed')
reactor.stop()
f = DCFactory()
reactor.connectTCP('localhost', 411, f)
reactor.run()
if __name__ == '__main__':
main()
# vi: set et sta sw=4 ts=4:
|
gpl-3.0
|
skg-net/ansible
|
lib/ansible/modules/network/nxos/nxos_snmp_traps.py
|
61
|
7281
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_traps
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP traps.
description:
- Manages SNMP traps configurations.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- This module works at the group level for traps. If you need to only
enable/disable 1 specific trap within a group, use the M(nxos_command)
module.
- Be aware that you can set a trap only for an enabled feature.
options:
group:
description:
- Case sensitive group.
required: true
choices: ['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp', 'license',
'link', 'lldp', 'mmode', 'ospf', 'pim', 'rf', 'rmon', 'snmp',
'storm-control', 'stpx', 'switchfabric', 'syslog', 'sysmgr', 'system',
'upgrade', 'vtp', 'all']
state:
description:
- Manage the state of the resource.
required: false
default: enabled
choices: ['enabled','disabled']
'''
EXAMPLES = '''
# ensure lldp trap configured
- nxos_snmp_traps:
group: lldp
state: enabled
# ensure lldp trap is not configured
- nxos_snmp_traps:
group: lldp
state: disabled
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: "snmp-server enable traps lldp ;"
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command = {
'command': command,
'output': 'text',
}
return run_commands(module, command)
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_traps(group, module):
body = execute_show_command('show run snmp all', module)[0].split('\n')
resource = {}
feature_list = ['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp',
'license', 'link', 'lldp', 'mmode', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx',
'switchfabric', 'syslog', 'sysmgr', 'system', 'upgrade',
'vtp']
for each in feature_list:
for line in body:
if each == 'ospf':
# ospf behaves differently when routers are present
if 'snmp-server enable traps ospf' == line:
resource[each] = True
break
else:
if 'enable traps {0}'.format(each) in line:
if 'no ' in line:
resource[each] = False
break
else:
resource[each] = True
for each in feature_list:
if resource.get(each) is None:
# on some platforms, the 'no' cmd does not
# show up and so check if the feature is enabled
body = execute_show_command('show run | inc feature', module)[0]
if 'feature {0}'.format(each) in body:
resource[each] = False
find = resource.get(group, None)
if group == 'all'.lower():
return resource
elif find is not None:
trap_resource = {group: find}
return trap_resource
else:
# if 'find' is None, it means that 'group' is a
# currently disabled feature.
return {}
def get_trap_commands(group, state, existing, module):
commands = []
enabled = False
disabled = False
if group == 'all':
if state == 'disabled':
for feature in existing:
if existing[feature]:
trap_command = 'no snmp-server enable traps {0}'.format(feature)
commands.append(trap_command)
elif state == 'enabled':
for feature in existing:
if existing[feature] is False:
trap_command = 'snmp-server enable traps {0}'.format(feature)
commands.append(trap_command)
else:
if group in existing:
if existing[group]:
enabled = True
else:
disabled = True
if state == 'disabled' and enabled:
commands.append(['no snmp-server enable traps {0}'.format(group)])
elif state == 'enabled' and disabled:
commands.append(['snmp-server enable traps {0}'.format(group)])
else:
module.fail_json(msg='{0} is not a currently '
'enabled feature.'.format(group))
return commands
def main():
argument_spec = dict(
state=dict(choices=['enabled', 'disabled'], default='enabled'),
group=dict(choices=['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp',
'license', 'link', 'lldp', 'mmode', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx',
'switchfabric', 'syslog', 'sysmgr', 'system', 'upgrade',
'vtp', 'all'],
required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
group = module.params['group'].lower()
state = module.params['state']
existing = get_snmp_traps(group, module)
commands = get_trap_commands(group, state, existing, module)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
aegm/pcvs
|
fabfile.py
|
1
|
5579
|
# -*- coding: utf-8 -*-
"""Fabfile de procedimientos de deployment.
En el presente fabfile se desarrolla una serie de procedimientos que automatizan
las actividades de deployment para el presente proyecto.
"""
import os
from fabric.api import *
from fabric.colors import *
# Definimos algunos datos sobre dónde y con qué identidad ejecutaremos los
# comandos, normalmente no hay que cambiar nada ya que en el servidor de
# producción esto no debería de variar ni se usará este archivo para correrse
# localmente.
env.hosts = ['localhost']
env.user = 'jenkins'
env.password = 'j3nk1s**111'
# También se definen las rutas base, del directorio de la marca, y el directorio
# que se copiará (en orden de aparición), al final se mezcla todo para obtener
# la ruta de trabajo completa. No son necesarios los slashes al final (se
# observa que DEPLOY_PATH los incluye)
DEPLOY_BASE_PATH = '/var/www/nginx/2014'
BRAND_FOLDER = 'pilsencallao'
DEPLOY_FOLDER = 'sanvalentin-staging'
# DEPLOY_FOLDER = os.getcwd().split('/')[-1] # Puede ser, por ejemplo 'sitio-abc'
# Si se desea que script ingrese a la base de datos y cree una base de datos
# nueva (o la limpie si existe), rellenar los datos de acceso y el nombre de la
# DB, y si además se desea cargar un dump, colocar su nombre (debe estar en el
# repositorio, junto a este archivo), de lo contrario dejar en blanco como
# corresponda.
DATABASE_USERNAME = 'root'
DATABASE_PASSWORD = '*sql$$cr1xus*'
DATABASE_NAME = 'sanvalentin_db'
DUMP_FILE = 'dump.sql'
# Normalización del password de la base de datos, no debería de tocarse.
DATABASE_PASSWORD = DATABASE_PASSWORD.replace('$', '\\$')
# Esto junta las rutas escritas en la sección anterior, normalmente no tendría
# por qué ser editado.
BASE_BRAND_PATH = '%s/%s' % (DEPLOY_BASE_PATH, BRAND_FOLDER,)
DEPLOY_PATH = '%s/%s/' % (BASE_BRAND_PATH, DEPLOY_FOLDER,)
@task
def deploy_staging():
"""Staging deployment process."""
print cyan('Deploy staging begins.')
# Teardown: donde normalmente se limpia todo. Se observa la destrucción de
# la base de datos (si se dieron los datos) y el directorio anterior
# del proyecto.
print red('Performing teardown procedures...')
sudo('rm -rf %s' % DEPLOY_PATH)
run('mkdir -p %s' % DEPLOY_PATH)
if DATABASE_USERNAME and DATABASE_PASSWORD and DATABASE_NAME:
run(
'echo \'DROP DATABASE IF EXISTS %s;\' | mysql -u\'%s\' -p\'%s\''
% (DATABASE_NAME, DATABASE_USERNAME, DATABASE_PASSWORD,),
shell_escape=False
)
# Si se desea realizar alguna operación adicional después del Teardown
# (y/o antes del Setup), editar la función post_teardown, para mantener
# esta sección lo más limpia posible.
print green('Performing post-teardown procedures...')
post_teardown()
# Setup: Las operaciones que normalmente se realizan al haberse limpiado el
# entorno, en este caso, se regenera la base de datos si se suministró el
# archivo de respaldo de la misma para terminar copiando todo el proyecto a
# su ubicación final.
print green('Performing setup procedures...')
if DATABASE_USERNAME and DATABASE_PASSWORD and DATABASE_NAME:
run(
'echo \'CREATE DATABASE %s;\' | mysql -u\'%s\' -p\'%s\''
% (DATABASE_NAME, DATABASE_USERNAME, DATABASE_PASSWORD,),
shell_escape=False
)
if DUMP_FILE:
run(
'mysql -u\'%s\' -p\'%s\' %s < %s/jenkins/%s'
% (DATABASE_USERNAME, DATABASE_PASSWORD, DATABASE_NAME,
os.getcwd(), DUMP_FILE),
shell_escape=False
)
run('cp -R %s/* %s' % (os.getcwd(), DEPLOY_PATH))
# Si en este momento se requiere de alguna serie de operaciones adicionales,
# estas tienen que definirse en la función post_setup.
print green('Performing post-setup procedures...')
post_setup()
print green('All done (with success, hopefuly)!')
def post_teardown():
"""
Actividades que se realizan después del Teardown y antes del Setup que son
específicas para este proyecto únicamente. De otra forma, esta función
debería de quedar vacía (sólo la sentencia 'pass').
"""
# run('mkdir -p %s/system/cusquena/cache/default' % DEPLOY_PATH)
# run('chmod 777 -R %s/system/cusquena/cache' % DEPLOY_PATH)
pass
def post_setup():
"""
Actividades a realizarse después del Setup, al finalizar la ejecución
normal de todas las actividades del script. De otra forma, esta función
debería de quedar vacía (sólo la sentencia 'pass').
"""
with cd(DEPLOY_PATH):
sudo('cp jenkins/sanvalentin-staging.conf /etc/nginx/conf.d/')
with settings(warn_only=True):
sudo('service nginx reload')
# Apéndice: Fabric in a nutshell
#
# Para ejecutar un comando, utilizar run('') y poner el comando dentro de las
# comillas. Si se necesita usar alguna constante definida al inicio del script,
# escribir '%s' en los lugares necesarios, y acompañar las comillas del símbolo
# % seguido de una lista de las variables (tantas como %s's se hayan definido,
# en orden de aparición, se puede observar un ejemplo del uso en deploy_staging)
#
# Si se necesitan permisos de administrador para ejecutar el comando, cambiar
# 'run' por 'sudo'.
#
# Si el comando puede fallar y no se desea que el script se detenga por este
# motivo, escribir 'with settings(warn_only=True):' e indentar un nivel la(s)
# instruccion(es) que se necesiten ejecutarse de esta manera.
|
bsd-3-clause
|
dalanlan/calico-docker
|
calico_containers/tests/unit/endpoint_test.py
|
9
|
1335
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
from nose_parameterized import parameterized
from calico_ctl import endpoint
class TestEndpoint(unittest.TestCase):
@parameterized.expand([
({'<PROFILES>':['profile-1', 'profile-2', 'profile-3']}, False),
({'<PROFILES>':['Profile1', 'Profile!']}, True),
({}, False)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl endpoint command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
endpoint.validate_arguments(case)
# Assert method exits if bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
|
apache-2.0
|
ajose01/rethinkdb
|
external/v8_3.30.33.16/tools/push-to-trunk/script_test.py
|
95
|
2294
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Wraps test execution with a coverage analysis. To get the best speed, the
# native python coverage version >= 3.7.1 should be installed.
import coverage
import os
import unittest
import sys
def Main(argv):
script_path = os.path.dirname(os.path.abspath(__file__))
cov = coverage.coverage(include=([os.path.join(script_path, '*.py')]))
cov.start()
import test_scripts
alltests = map(unittest.TestLoader().loadTestsFromTestCase, [
test_scripts.ToplevelTest,
test_scripts.ScriptTest,
test_scripts.SystemTest,
])
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
cov.stop()
print cov.report()
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
agpl-3.0
|
soldag/home-assistant
|
homeassistant/components/mqtt/light/schema_template.py
|
3
|
17037
|
"""Support for MQTT Template lights."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_UNIQUE_ID,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.color as color_util
from ..debug_info import log_messages
from .schema import MQTT_LIGHT_SCHEMA_SCHEMA
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt_template"
DEFAULT_NAME = "MQTT Template Light"
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = "blue_template"
CONF_BRIGHTNESS_TEMPLATE = "brightness_template"
CONF_COLOR_TEMP_TEMPLATE = "color_temp_template"
CONF_COMMAND_OFF_TEMPLATE = "command_off_template"
CONF_COMMAND_ON_TEMPLATE = "command_on_template"
CONF_EFFECT_LIST = "effect_list"
CONF_EFFECT_TEMPLATE = "effect_template"
CONF_GREEN_TEMPLATE = "green_template"
CONF_MAX_MIREDS = "max_mireds"
CONF_MIN_MIREDS = "min_mireds"
CONF_RED_TEMPLATE = "red_template"
CONF_STATE_TEMPLATE = "state_template"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
PLATFORM_SCHEMA_TEMPLATE = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_MAX_MIREDS): cv.positive_int,
vol.Optional(CONF_MIN_MIREDS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
)
async def async_setup_entity_template(
hass, config, async_add_entities, config_entry, discovery_data
):
"""Set up a MQTT Template light."""
async_add_entities([MqttLightTemplate(config, config_entry, discovery_data)])
class MqttLightTemplate(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
LightEntity,
RestoreEntity,
):
"""Representation of a MQTT Template light."""
def __init__(self, config, config_entry, discovery_data):
"""Initialize a MQTT Template light."""
self._state = False
self._sub_state = None
self._topics = None
self._templates = None
self._optimistic = False
# features
self._brightness = None
self._color_temp = None
self._white_value = None
self._hs = None
self._effect = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_TEMPLATE(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topics = {
key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC)
}
self._templates = {
key: config.get(key)
for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = (
optimistic
or self._topics[CONF_STATE_TOPIC] is None
or self._templates[CONF_STATE_TEMPLATE] is None
)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
last_state = await self.async_get_last_state()
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new MQTT messages."""
state = self._templates[
CONF_STATE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
try:
self._brightness = int(
self._templates[
CONF_BRIGHTNESS_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
try:
self._color_temp = int(
self._templates[
CONF_COLOR_TEMP_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid color temperature value received")
if (
self._templates[CONF_RED_TEMPLATE] is not None
and self._templates[CONF_GREEN_TEMPLATE] is not None
and self._templates[CONF_BLUE_TEMPLATE] is not None
):
try:
red = int(
self._templates[
CONF_RED_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
green = int(
self._templates[
CONF_GREEN_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
blue = int(
self._templates[
CONF_BLUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except ValueError:
_LOGGER.warning("Invalid color value received")
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
try:
self._white_value = int(
self._templates[
CONF_WHITE_VALUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid white value received")
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[
CONF_EFFECT_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if effect in self._config.get(CONF_EFFECT_LIST):
self._effect = effect
else:
_LOGGER.warning("Unsupported effect value received")
self.async_write_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._topics[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._config.get(CONF_MIN_MIREDS, super().min_mireds)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._config.get(CONF_MAX_MIREDS, super().max_mireds)
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
async def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method is a coroutine.
"""
values = {"state": True}
if self._optimistic:
self._state = True
if ATTR_BRIGHTNESS in kwargs:
values["brightness"] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
values["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS,
self._brightness if self._brightness is not None else 255,
)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100
)
values["red"] = rgb[0]
values["green"] = rgb[1]
values["blue"] = rgb[2]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE in kwargs:
values["white_value"] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
values["effect"] = kwargs.get(ATTR_EFFECT)
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
if ATTR_FLASH in kwargs:
values["flash"] = kwargs.get(ATTR_FLASH)
if ATTR_TRANSITION in kwargs:
values["transition"] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(
parse_result=False, **values
),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method is a coroutine.
"""
values = {"state": False}
if self._optimistic:
self._state = False
if ATTR_TRANSITION in kwargs:
values["transition"] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(
parse_result=False, **values
),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = SUPPORT_FLASH | SUPPORT_TRANSITION
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
features = features | SUPPORT_BRIGHTNESS
if (
self._templates[CONF_RED_TEMPLATE] is not None
and self._templates[CONF_GREEN_TEMPLATE] is not None
and self._templates[CONF_BLUE_TEMPLATE] is not None
):
features = features | SUPPORT_COLOR
if self._config.get(CONF_EFFECT_LIST) is not None:
features = features | SUPPORT_EFFECT
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
features = features | SUPPORT_COLOR_TEMP
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
features = features | SUPPORT_WHITE_VALUE
return features
|
apache-2.0
|
larsks/cobbler-larsks
|
koan/text_wrap.py
|
21
|
13867
|
"""Text wrapping and filling.
(note: repackaged in koan because it's not present in RHEL3)
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id: textwrap.py,v 1.32.8.2 2004/05/13 01:48:15 gward Exp $"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils).
try:
True, False
except NameError:
(True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(r'(\s+|' # any whitespace
r'-*\w{2,}-(?=\w{2,})|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX will there be a locale-or-charset-aware version of
# string.lowercase in 2.3?
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(chunks[0][0:space_left])
chunks[0] = chunks[0][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(chunks.pop(0))
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if chunks[0].strip() == '' and lines:
del chunks[0]
while chunks:
l = len(chunks[0])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop(0))
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[0]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
indent = self.initial_indent
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)
|
gpl-2.0
|
rockneurotiko/django
|
django/db/models/query.py
|
7
|
67989
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
Q, InvalidQuery, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterator(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterator(BaseIterator):
"""
Iterator that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterator(BaseIterator):
"""
Iterator returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterator(BaseIterator):
"""
Iterator returned by QuerySet.values_lists(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterator(BaseIterator):
"""
Iterator returned by QuerySet.values_lists(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterator_class = ModelIterator
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return self._iterator_class(self)
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. Multi-table models are not supported.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db, savepoint=False):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterator_class = ValuesIterator
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterator_class = FlatValuesListIterator if flat else ValuesListIterator
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterator_class = self._iterator_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
to_attr, as_attr = lookup.get_current_to_attr(level)
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
bsd-3-clause
|
googleapis/googleapis-gen
|
google/firestore/v1beta1/firestore-v1beta1-py/google/firestore_v1beta1/services/firestore/pagers.py
|
1
|
15845
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.firestore_v1beta1.types import document
from google.firestore_v1beta1.types import firestore
from google.firestore_v1beta1.types import query
class ListDocumentsPager:
"""A pager for iterating through ``list_documents`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.ListDocumentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``documents`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDocuments`` requests and continue to iterate
through the ``documents`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.ListDocumentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., firestore.ListDocumentsResponse],
request: firestore.ListDocumentsRequest,
response: firestore.ListDocumentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.ListDocumentsRequest):
The initial request object.
response (google.firestore_v1beta1.types.ListDocumentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.ListDocumentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[firestore.ListDocumentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[document.Document]:
for page in self.pages:
yield from page.documents
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDocumentsAsyncPager:
"""A pager for iterating through ``list_documents`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.ListDocumentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``documents`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDocuments`` requests and continue to iterate
through the ``documents`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.ListDocumentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[firestore.ListDocumentsResponse]],
request: firestore.ListDocumentsRequest,
response: firestore.ListDocumentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.ListDocumentsRequest):
The initial request object.
response (google.firestore_v1beta1.types.ListDocumentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.ListDocumentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[firestore.ListDocumentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[document.Document]:
async def async_generator():
async for page in self.pages:
for response in page.documents:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class PartitionQueryPager:
"""A pager for iterating through ``partition_query`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.PartitionQueryResponse` object, and
provides an ``__iter__`` method to iterate through its
``partitions`` field.
If there are more pages, the ``__iter__`` method will make additional
``PartitionQuery`` requests and continue to iterate
through the ``partitions`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.PartitionQueryResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., firestore.PartitionQueryResponse],
request: firestore.PartitionQueryRequest,
response: firestore.PartitionQueryResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.PartitionQueryRequest):
The initial request object.
response (google.firestore_v1beta1.types.PartitionQueryResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.PartitionQueryRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[firestore.PartitionQueryResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[query.Cursor]:
for page in self.pages:
yield from page.partitions
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class PartitionQueryAsyncPager:
"""A pager for iterating through ``partition_query`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.PartitionQueryResponse` object, and
provides an ``__aiter__`` method to iterate through its
``partitions`` field.
If there are more pages, the ``__aiter__`` method will make additional
``PartitionQuery`` requests and continue to iterate
through the ``partitions`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.PartitionQueryResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[firestore.PartitionQueryResponse]],
request: firestore.PartitionQueryRequest,
response: firestore.PartitionQueryResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.PartitionQueryRequest):
The initial request object.
response (google.firestore_v1beta1.types.PartitionQueryResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.PartitionQueryRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[firestore.PartitionQueryResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[query.Cursor]:
async def async_generator():
async for page in self.pages:
for response in page.partitions:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListCollectionIdsPager:
"""A pager for iterating through ``list_collection_ids`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.ListCollectionIdsResponse` object, and
provides an ``__iter__`` method to iterate through its
``collection_ids`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCollectionIds`` requests and continue to iterate
through the ``collection_ids`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.ListCollectionIdsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., firestore.ListCollectionIdsResponse],
request: firestore.ListCollectionIdsRequest,
response: firestore.ListCollectionIdsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.ListCollectionIdsRequest):
The initial request object.
response (google.firestore_v1beta1.types.ListCollectionIdsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.ListCollectionIdsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[firestore.ListCollectionIdsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[str]:
for page in self.pages:
yield from page.collection_ids
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListCollectionIdsAsyncPager:
"""A pager for iterating through ``list_collection_ids`` requests.
This class thinly wraps an initial
:class:`google.firestore_v1beta1.types.ListCollectionIdsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``collection_ids`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCollectionIds`` requests and continue to iterate
through the ``collection_ids`` field on the
corresponding responses.
All the usual :class:`google.firestore_v1beta1.types.ListCollectionIdsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[firestore.ListCollectionIdsResponse]],
request: firestore.ListCollectionIdsRequest,
response: firestore.ListCollectionIdsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.firestore_v1beta1.types.ListCollectionIdsRequest):
The initial request object.
response (google.firestore_v1beta1.types.ListCollectionIdsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = firestore.ListCollectionIdsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[firestore.ListCollectionIdsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[str]:
async def async_generator():
async for page in self.pages:
for response in page.collection_ids:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
apache-2.0
|
jotes/pontoon
|
pontoon/machinery/views.py
|
1
|
12755
|
import json
import logging
import requests
import xml.etree.ElementTree as ET
from urllib.parse import quote
from caighdean import Translator
from caighdean.exceptions import TranslationError
from uuid import uuid4
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import render
from django.template.loader import get_template
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.html import escape
from pontoon.base import utils
from pontoon.base.models import Entity, Locale, Translation
from pontoon.machinery.utils import (
get_concordance_search_data,
get_google_translate_data,
get_translation_memory_data,
)
log = logging.getLogger(__name__)
def machinery(request):
locale = utils.get_project_locale_from_request(request, Locale.objects) or "en-GB"
return render(
request,
"machinery/machinery.html",
{
"locale": Locale.objects.get(code=locale),
"locales": Locale.objects.all(),
"is_google_translate_supported": bool(settings.GOOGLE_TRANSLATE_API_KEY),
"is_microsoft_translator_supported": bool(
settings.MICROSOFT_TRANSLATOR_API_KEY
),
"is_systran_translate_supported": bool(settings.SYSTRAN_TRANSLATE_API_KEY),
},
)
def translation_memory(request):
"""Get translations from internal translations memory."""
try:
text = request.GET["text"]
locale = request.GET["locale"]
pk = int(request.GET["pk"])
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
try:
locale = Locale.objects.get(code=locale)
except Locale.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": "Not Found: {error}".format(error=e)},
status=404,
)
data = get_translation_memory_data(text, locale, pk)
return JsonResponse(data, safe=False)
def concordance_search(request):
"""Search for translations in the internal translations memory."""
try:
text = request.GET["text"]
locale = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
try:
locale = Locale.objects.get(code=locale)
except Locale.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": "Not Found: {error}".format(error=e)},
status=404,
)
data = get_concordance_search_data(text, locale)
return JsonResponse(data, safe=False)
def microsoft_translator(request):
"""Get translation from Microsoft machine translation service."""
try:
text = request.GET["text"]
locale_code = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
api_key = settings.MICROSOFT_TRANSLATOR_API_KEY
if not api_key:
log.error("MICROSOFT_TRANSLATOR_API_KEY not set")
return JsonResponse(
{"status": False, "message": "Bad Request: Missing api key."}, status=400
)
# Validate if locale exists in the database to avoid any potential XSS attacks.
if not Locale.objects.filter(ms_translator_code=locale_code).exists():
return JsonResponse(
{
"status": False,
"message": "Not Found: {error}".format(error=locale_code),
},
status=404,
)
url = "https://api.cognitive.microsofttranslator.com/translate"
headers = {"Ocp-Apim-Subscription-Key": api_key, "Content-Type": "application/json"}
payload = {
"api-version": "3.0",
"from": "en",
"to": locale_code,
"textType": "html",
}
body = [{"Text": text}]
try:
r = requests.post(url, params=payload, headers=headers, json=body)
root = json.loads(r.content)
if "error" in root:
log.error("Microsoft Translator error: {error}".format(error=root))
return JsonResponse(
{
"status": False,
"message": "Bad Request: {error}".format(error=root),
},
status=400,
)
return JsonResponse({"translation": root[0]["translations"][0]["text"]})
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
def google_translate(request):
"""Get translation from Google machine translation service."""
try:
text = request.GET["text"]
locale_code = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
# Validate if locale exists in the database to avoid any potential XSS attacks.
if not Locale.objects.filter(google_translate_code=locale_code).exists():
return JsonResponse(
{
"status": False,
"message": "Not Found: {error}".format(error=locale_code),
},
status=404,
)
data = get_google_translate_data(text, locale_code)
if not data["status"]:
return JsonResponse(data, status=400)
return JsonResponse(data)
def systran_translate(request):
"""Get translations from SYSTRAN machine translation service."""
try:
text = request.GET["text"]
locale_code = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
api_key = settings.SYSTRAN_TRANSLATE_API_KEY
if not api_key:
log.error("SYSTRAN_TRANSLATE_API_KEY not set")
return JsonResponse(
{"status": False, "message": "Bad Request: Missing api key."}, status=400
)
# Validate if locale exists in the database to avoid any potential XSS attacks.
try:
locale = Locale.objects.filter(systran_translate_code=locale_code).first()
except Locale.DoesNotExist:
return JsonResponse(
{
"status": False,
"message": "Not Found: {error}".format(error=locale_code),
},
status=404,
)
url = (
"https://translationpartners-spn9.mysystran.com:8904/translation/text/translate"
)
payload = {
"key": api_key,
"input": text,
"source": "en",
"target": locale_code,
"profile": locale.systran_translate_profile,
"format": "text",
}
try:
r = requests.post(url, params=payload)
root = json.loads(r.content)
if "error" in root:
log.error("SYSTRAN error: {error}".format(error=root))
return JsonResponse(
{
"status": False,
"message": "Bad Request: {error}".format(error=root),
},
status=400,
)
return JsonResponse({"translation": root["outputs"][0]["output"]})
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
def caighdean(request):
"""Get translation from Caighdean machine translation service."""
try:
entityid = int(request.GET["id"])
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
try:
entity = Entity.objects.get(id=entityid)
except Entity.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": "Not Found: {error}".format(error=e)},
status=404,
)
try:
text = entity.translation_set.get(
locale__code="gd",
plural_form=None if entity.string_plural == "" else 0,
approved=True,
).string
except Translation.DoesNotExist:
return JsonResponse({})
try:
translation = Translator().translate(text)
return JsonResponse({"original": text, "translation": translation})
except TranslationError as e:
return JsonResponse(
{"status": False, "message": "Server Error: {error}".format(error=e)},
status=500,
)
def microsoft_terminology(request):
"""Get translations from Microsoft Terminology Service."""
try:
text = request.GET["text"]
locale_code = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
# Validate if locale exists in the database to avoid any potential XSS attacks.
if not Locale.objects.filter(ms_terminology_code=locale_code).exists():
return JsonResponse(
{
"status": False,
"message": "Not Found: {error}".format(error=locale_code),
},
status=404,
)
obj = {}
url = "http://api.terminology.microsoft.com/Terminology.svc"
headers = {
"SOAPAction": (
'"http://api.terminology.microsoft.com/terminology/Terminology/GetTranslations"'
),
"Content-Type": "text/xml; charset=utf-8",
}
payload = {
"uuid": uuid4(),
"text": quote(text.encode("utf-8")),
"to": locale_code,
"max_result": 5,
}
template = get_template("machinery/microsoft_terminology.jinja")
payload = template.render(payload)
try:
r = requests.post(url, data=payload, headers=headers)
translations = []
xpath = ".//{http://api.terminology.microsoft.com/terminology}"
root = ET.fromstring(r.content)
results = root.find(xpath + "GetTranslationsResult")
if results is not None:
for translation in results:
translations.append(
{
"source": translation.find(xpath + "OriginalText").text,
"target": translation.find(xpath + "TranslatedText").text,
"quality": int(
translation.find(xpath + "ConfidenceLevel").text
),
}
)
obj["translations"] = translations
return JsonResponse(obj)
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
def transvision(request):
"""Get Mozilla translations from Transvision service."""
try:
text = request.GET["text"]
locale = request.GET["locale"]
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
try:
text = quote(text.encode("utf-8"))
except KeyError as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
url = u"https://transvision.mozfr.org/api/v1/tm/global/en-US/{locale}/{text}/".format(
locale=locale, text=text
)
payload = {
"max_results": 5,
"min_quality": 70,
}
try:
r = requests.get(url, params=payload)
if "error" in r.json():
error = r.json()["error"]
log.error("Transvision error: {error}".format(error=error))
error = escape(error)
return JsonResponse(
{
"status": False,
"message": "Bad Request: {error}".format(error=error),
},
status=400,
)
return JsonResponse(r.json(), safe=False)
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": "Bad Request: {error}".format(error=e)},
status=400,
)
|
bsd-3-clause
|
mdileep/mutatorMath.gui
|
Flask/PythonWeb/views.py
|
2
|
1674
|
from flask import render_template,Response
from PythonWeb import app
from PythonWeb import lib
from PythonWeb import business
from datetime import datetime
def renderTemplate(htmlPage,navActive):
return render_template(htmlPage, now=datetime.now(),sessionId=lib.getNewSessionId(),active=navActive)
@app.route('/gui')
@app.route('/GUI')
def appGUIPage():
return renderTemplate('gui.html','gui')
@app.route('/')
@app.route('/about')
@app.route('/About')
def appHomePage():
return renderTemplate('about.html','about')
@app.route('/contact')
@app.route('/Contact')
def appContactPage():
return renderTemplate('contact.html','contact')
@app.route('/license')
@app.route('/License')
def appLicensePage():
return renderTemplate('license.html','license')
@app.route('/Env')
@app.route('/env')
def appEnv():
return business.showEnvDetails()
@app.route('/Uploader')
@app.route('/uploader')
def appUploader():
return render_template('uploader.html')
@app.route('/Upload.zip', methods=['POST'])
@app.route('/upload.zip', methods=['POST'])
def appUploadZip():
return business.upload(['zip'])
@app.route('/Download/<filename>')
@app.route('/download/<filename>')
def appGetFile(filename):
return business.sendFile(filename)
@app.route('/View/<filename>.designspace')
@app.route('/view/<filename>.designspace')
def appShowXmlFile(filename):
return business.showXmlFile(filename)
@app.route('/View/<filename>.log')
@app.route('/view/<filename>.log')
def appShowFile(filename):
return business.showLogFile(filename)
@app.route('/Run', methods=['POST'])
@app.route('/run', methods=['POST'])
def appGo():
return business.run()
|
gpl-3.0
|
noodle-learns-programming/python-social-auth
|
social/apps/pyramid_app/views.py
|
75
|
1091
|
from pyramid.view import view_config
from social.utils import module_member
from social.actions import do_auth, do_complete, do_disconnect
from social.apps.pyramid_app.utils import psa, login_required
@view_config(route_name='social.auth', request_method='GET')
@psa('social.complete')
def auth(request):
return do_auth(request.backend, redirect_name='next')
@view_config(route_name='social.complete', request_method=('GET', 'POST'))
@psa('social.complete')
def complete(request, *args, **kwargs):
do_login = module_member(request.backend.setting('LOGIN_FUNCTION'))
return do_complete(request.backend, do_login, request.user,
redirect_name='next', *args, **kwargs)
@view_config(route_name='social.disconnect', request_method=('POST',))
@view_config(route_name='social.disconnect_association',
request_method=('POST',))
@psa()
@login_required
def disconnect(request):
return do_disconnect(request.backend, request.user,
request.matchdict.get('association_id'),
redirect_name='next')
|
bsd-3-clause
|
chengdh/openerp-ktv
|
openerp/pychart/area_doc.py
|
15
|
3581
|
# -*- coding: utf-8 -*-
# automatically generated by generate_docs.py.
doc="""Attributes supported by this class are:
plots(type:list) default="Used only internally by pychart.".
loc(type:(x,y)) default="The location of the bottom-left corner of the chart.
@cindex chart location
@cindex location, chart
".
y_grid_style(type:line_style.T) default="The style of vertical grid lines.".
y_grid_interval(type:Number or function) default="The vertical grid-line interval. See also x_grid_interval".
x_grid_over_plot(type:int) default="If True, grid lines are drawn over plots. Otherwise, plots are drawn over grid lines.".
x_range(type:(x,y)) default="Specifies the range of X values that are displayed in the
chart. IF the value is None, both the values are computed
automatically from the samples. Otherwise, the value must be a
tuple of format (MIN, MAX). MIN and MAX must be either None or a
number. If None, the value is computed automatically from the
samples. For example, if x_range = (None,5), then the minimum X
value is computed automatically, but the maximum X value is fixed
at 5.".
y_coord(type:coord.T) default="Set the Y coordinate system.".
A linear coordinate system.
y_range(type:(x,y)) default="Specifies the range of Y values that are displayed in the
chart. IF the value is None, both the values are computed
automatically from the samples. Otherwise, the value must be a
tuple of format (MIN, MAX). MIN and MAX must be either None or a
number. If None, the value is computed automatically from the
samples. For example, if y_range = (None,5), then the minimum Y
value is computed automatically, but the maximum Y value is fixed
at 5.".
x_axis(type:axis.X) default="The X axis. <<axis>>.".
bg_style(type:fill_style.T) default="Background fill-pattern.".
x_coord(type:coord.T) default="Set the X coordinate system.".
A linear coordinate system.
legend(type:legend.T) default="The legend of the chart.".
a legend is by default displayed in the right-center of the
chart.
y_grid_over_plot(type:int) default="See x_grid_over_plot.".
x_axis2(type:axis.X) default="The second X axis. This axis should be non-None either when you want to display plots with two distinct domains or when
you just want to display two axes at the top and bottom of the chart.
<<axis>>".
y_axis2(type:axis.Y) default="The second Y axis. This axis should be non-None either when you want to display plots with two distinct ranges or when
you just want to display two axes at the left and right of the chart. <<axis>>".
x_grid_style(type:line_style.T) default="The style of horizontal grid lines.
@cindex grid lines".
y_axis(type:axis.Y) default="The Y axis. <<axis>>.".
border_line_style(type:line_style.T) default="Line style of the outer frame of the chart.".
x_grid_interval(type:Number or function) default="The horizontal grid-line interval.
A numeric value
specifies the interval at which
lines are drawn. If value is a function, it
takes two arguments, (MIN, MAX), that tells
the minimum and maximum values found in the
sample data. The function should return a list
of values at which lines are drawn.".
size(type:(x,y)) default="The size of the chart-drawing area, excluding axis labels,
legends, tick marks, etc.
@cindex chart size
@cindex size, chart
".
"""
|
agpl-3.0
|
johankaito/fufuka
|
microblog/old-flask/lib/python2.7/site-packages/wtforms/widgets/html5.py
|
153
|
2214
|
"""
Widgets for various HTML5 input types.
"""
from .core import Input
__all__ = (
'ColorInput', 'DateInput', 'DateTimeInput', 'DateTimeLocalInput',
'EmailInput', 'MonthInput', 'NumberInput', 'RangeInput', 'SearchInput',
'TelInput', 'TimeInput', 'URLInput', 'WeekInput',
)
class SearchInput(Input):
"""
Renders an input with type "search".
"""
input_type = 'search'
class TelInput(Input):
"""
Renders an input with type "tel".
"""
input_type = 'tel'
class URLInput(Input):
"""
Renders an input with type "url".
"""
input_type = 'url'
class EmailInput(Input):
"""
Renders an input with type "email".
"""
input_type = 'email'
class DateTimeInput(Input):
"""
Renders an input with type "datetime".
"""
input_type = 'datetime'
class DateInput(Input):
"""
Renders an input with type "date".
"""
input_type = 'date'
class MonthInput(Input):
"""
Renders an input with type "month".
"""
input_type = 'month'
class WeekInput(Input):
"""
Renders an input with type "week".
"""
input_type = 'week'
class TimeInput(Input):
"""
Renders an input with type "time".
"""
input_type = 'time'
class DateTimeLocalInput(Input):
"""
Renders an input with type "datetime-local".
"""
input_type = 'datetime-local'
class NumberInput(Input):
"""
Renders an input with type "number".
"""
input_type = 'number'
def __init__(self, step=None):
self.step = step
def __call__(self, field, **kwargs):
if self.step is not None:
kwargs.setdefault('step', self.step)
return super(NumberInput, self).__call__(field, **kwargs)
class RangeInput(Input):
"""
Renders an input with type "range".
"""
input_type = 'range'
def __init__(self, step=None):
self.step = step
def __call__(self, field, **kwargs):
if self.step is not None:
kwargs.setdefault('step', self.step)
return super(RangeInput, self).__call__(field, **kwargs)
class ColorInput(Input):
"""
Renders an input with type "color".
"""
input_type = 'color'
|
apache-2.0
|
aldian/tensorflow
|
tensorflow/python/saved_model/utils_test.py
|
62
|
5133
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import utils
class UtilsTest(test.TestCase):
def testBuildTensorInfoDense(self):
x = array_ops.placeholder(dtypes.float32, 1, name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual("x:0", x_tensor_info.name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)
def testBuildTensorInfoSparse(self):
x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual(x.values.name,
x_tensor_info.coo_sparse.values_tensor_name)
self.assertEqual(x.indices.name,
x_tensor_info.coo_sparse.indices_tensor_name)
self.assertEqual(x.dense_shape.name,
x_tensor_info.coo_sparse.dense_shape_tensor_name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
def testGetTensorFromInfoDense(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, ops.Tensor)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoSparse(self):
expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, sparse_tensor.SparseTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.indices.name, actual.indices.name)
self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
def testGetTensorFromInfoInOtherGraph(self):
with ops.Graph().as_default() as expected_graph:
expected = array_ops.placeholder(dtypes.float32, 1, name="right")
tensor_info = utils.build_tensor_info(expected)
with ops.Graph().as_default(): # Some other graph.
array_ops.placeholder(dtypes.float32, 1, name="other")
actual = utils.get_tensor_from_tensor_info(tensor_info,
graph=expected_graph)
self.assertIsInstance(actual, ops.Tensor)
self.assertIs(actual.graph, expected_graph)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoInScope(self):
# Build a TensorInfo with name "bar/x:0".
with ops.Graph().as_default():
with ops.name_scope("bar"):
unscoped = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(unscoped)
self.assertEqual("bar/x:0", tensor_info.name)
# Build a graph with node "foo/bar/x:0", akin to importing into scope foo.
with ops.Graph().as_default():
with ops.name_scope("foo"):
with ops.name_scope("bar"):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
self.assertEqual("foo/bar/x:0", expected.name)
# Test that tensor is found by prepending the import scope.
actual = utils.get_tensor_from_tensor_info(tensor_info,
import_scope="foo")
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoRaisesErrors(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
tensor_info.name = "blah:0" # Nonexistant name.
with self.assertRaises(KeyError):
utils.get_tensor_from_tensor_info(tensor_info)
tensor_info.ClearField("name") # Malformed (missing encoding).
with self.assertRaises(ValueError):
utils.get_tensor_from_tensor_info(tensor_info)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
binoculars/osf.io
|
addons/box/tests/utils.py
|
12
|
6250
|
# -*- coding: utf-8 -*-
import mock
from contextlib import contextmanager
from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase
from addons.box.models import Provider
from addons.box.tests.factories import BoxAccountFactory
class BoxAddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'box'
ExternalAccountFactory = BoxAccountFactory
Provider = Provider
def set_node_settings(self, settings):
super(BoxAddonTestCase, self).set_node_settings(settings)
settings.folder_id = '1234567890'
settings.folder_name = 'Foo'
mock_responses = {
'folder': {
'name': 'anything',
'item_collection': {
'entries': [
{
'name': 'anything', 'type': 'file', 'id': 'anything'
},
{
'name': 'anything', 'type': 'folder', 'id': 'anything'
},
{
'name': 'anything', 'type': 'anything', 'id': 'anything'
},
]
},
'path_collection': {
'entries': [
{
'name': 'anything', 'type': 'file', 'id': 'anything'
},
{
'name': 'anything', 'type': 'folder', 'id': 'anything'
},
{
'name': 'anything', 'type': 'anything', 'id': 'anything'
},
]
}
},
'put_file': {
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'box',
'size': '77 bytes',
'thumb_exists': False
},
'metadata_list': {
"size": "0 bytes",
"hash": "37eb1ba1849d4b0fb0b28caf7ef3af52",
"bytes": 0,
"thumb_exists": False,
"rev": "714f029684fe",
"modified": "Wed, 27 Apr 2011 22:18:51 +0000",
"path": "/Public",
"is_dir": True,
"icon": "folder_public",
"root": "box",
"contents": [
{
"size": "0 bytes",
"rev": "35c1f029684fe",
"thumb_exists": False,
"bytes": 0,
"modified": "Mon, 18 Jul 2011 20:13:43 +0000",
"client_mtime": "Wed, 20 Apr 2011 16:20:19 +0000",
"path": "/Public/latest.txt",
"is_dir": False,
"icon": "page_white_text",
"root": "box",
"mime_type": "text/plain",
"revision": 220191
},
{
u'bytes': 0,
u'icon': u'folder',
u'is_dir': True,
u'modified': u'Sat, 22 Mar 2014 05:40:29 +0000',
u'path': u'/datasets/New Folder',
u'rev': u'3fed51f002c12fc',
u'revision': 67032351,
u'root': u'box',
u'size': u'0 bytes',
u'thumb_exists': False
}
],
"revision": 29007
},
'metadata_single': {
u'id': 'id',
u'bytes': 74,
u'client_mtime': u'Mon, 13 Jan 2014 20:24:15 +0000',
u'icon': u'page_white',
u'is_dir': False,
u'mime_type': u'text/csv',
u'modified': u'Fri, 21 Mar 2014 05:46:36 +0000',
u'path': '/datasets/foo.txt',
u'rev': u'a2149fb64',
u'revision': 10,
u'root': u'app_folder',
u'size': u'74 bytes',
u'thumb_exists': False
},
'revisions': [{u'bytes': 0,
u'client_mtime': u'Wed, 31 Dec 1969 23:59:59 +0000',
u'icon': u'page_white_picture',
u'is_deleted': True,
u'is_dir': False,
u'mime_type': u'image/png',
u'modified': u'Tue, 25 Mar 2014 03:39:13 +0000',
u'path': u'/svs-v-barks.png',
u'rev': u'3fed741002c12fc',
u'revision': 67032897,
u'root': u'box',
u'size': u'0 bytes',
u'thumb_exists': True},
{u'bytes': 151164,
u'client_mtime': u'Sat, 13 Apr 2013 21:56:36 +0000',
u'icon': u'page_white_picture',
u'is_dir': False,
u'mime_type': u'image/png',
u'modified': u'Tue, 25 Mar 2014 01:45:51 +0000',
u'path': u'/svs-v-barks.png',
u'rev': u'3fed61a002c12fc',
u'revision': 67032602,
u'root': u'box',
u'size': u'147.6 KB',
u'thumb_exists': True}]
}
class MockBox(object):
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
return mock_responses['put_file']
def metadata(self, path, list=True, file_limit=25000, hash=None, rev=None,
include_deleted=False):
if list:
ret = mock_responses['metadata_list']
else:
ret = mock_responses['metadata_single']
ret['path'] = path
return ret
def get_folder(*args, **kwargs):
return mock_responses['folder']
def get_file_and_metadata(*args, **kwargs):
pass
def file_delete(self, path):
return mock_responses['metadata_single']
def revisions(self, path):
ret = mock_responses['revisions']
for each in ret:
each['path'] = path
return ret
def get_user_info(self):
return {'display_name': 'Mr. Box'}
@contextmanager
def patch_client(target, mock_client=None):
"""Patches a function that returns a BoxClient, returning an instance
of MockBox instead.
Usage: ::
with patch_client('addons.box.views.BoxClient') as client:
# test view that uses the box client.
"""
with mock.patch(target) as client_getter:
client = mock_client or MockBox()
client_getter.return_value = client
yield client
|
apache-2.0
|
lucasrangit/twitter-winner
|
twitter-winner/oauthlib/oauth2/rfc6749/endpoints/resource.py
|
6
|
3200
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
class ResourceEndpoint(BaseEndpoint):
"""Authorizes access to protected resources.
The client accesses protected resources by presenting the access
token to the resource server. The resource server MUST validate the
access token and ensure that it has not expired and that its scope
covers the requested resource. The methods used by the resource
server to validate the access token (as well as any error responses)
are beyond the scope of this specification but generally involve an
interaction or coordination between the resource server and the
authorization server::
# For most cases, returning a 403 should suffice.
The method in which the client utilizes the access token to
authenticate with the resource server depends on the type of access
token issued by the authorization server. Typically, it involves
using the HTTP "Authorization" request header field [RFC2617] with an
authentication scheme defined by the specification of the access
token type used, such as [RFC6750]::
# Access tokens may also be provided in query and body
https://example.com/protected?access_token=kjfch2345sdf # Query
access_token=sdf23409df # Body
"""
def __init__(self, default_token, token_types):
BaseEndpoint.__init__(self)
self._tokens = token_types
self._default_token = default_token
@property
def default_token(self):
return self._default_token
@property
def default_token_type_handler(self):
return self.tokens.get(self.default_token)
@property
def tokens(self):
return self._tokens
@catch_errors_and_unavailability
def verify_request(self, uri, http_method='GET', body=None, headers=None,
scopes=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
request.scopes = scopes
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
log.debug('Dispatching token_type %s request to %r.',
request.token_type, token_type_handler)
return token_type_handler.validate_request(request), request
def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n) for n, t in self.tokens.items()))
return estimates[0][1] if len(estimates) else None
|
mit
|
wesm/statsmodels
|
scikits/statsmodels/sandbox/km_class.py
|
5
|
11704
|
#a class for the Kaplan-Meier estimator
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
|
bsd-3-clause
|
mhotwagner/abackend
|
abackend-env/lib/python3.5/site-packages/wheel/test/test_signatures.py
|
565
|
1120
|
from wheel import signatures
from wheel.signatures import djbec, ed25519py
from wheel.util import binary
def test_getlib():
signatures.get_ed25519ll()
def test_djbec():
djbec.dsa_test()
djbec.dh_test()
def test_ed25519py():
kp0 = ed25519py.crypto_sign_keypair(binary(' '*32))
kp = ed25519py.crypto_sign_keypair()
signed = ed25519py.crypto_sign(binary('test'), kp.sk)
ed25519py.crypto_sign_open(signed, kp.vk)
try:
ed25519py.crypto_sign_open(signed, kp0.vk)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_keypair(binary(' '*33))
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_open(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
|
mit
|
ArtsiomCh/tensorflow
|
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver_test.py
|
21
|
3916
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.tpu_cluster_resolver import TPUClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
class TPUClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def mock_service_client(
self,
tpu_map=None):
if tpu_map is None:
tpu_map = {}
def get_side_effect(name):
return tpu_map[name]
mock_client = mock.MagicMock()
mock_client.projects.locations.nodes.get.side_effect = get_side_effect
return mock_client
def testSimpleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470'
}
}
tpu_cluster_resolver = TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu_names=['test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = tpu_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'tpu_worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testMultipleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470'
},
'projects/test-project/locations/us-central1-c/nodes/test-tpu-2': {
'ipAddress': '10.4.5.6',
'port': '8470'
}
}
tpu_cluster_resolver = TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu_names=['test-tpu-2', 'test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = tpu_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'tpu_worker' tasks { key: 0 value: '10.4.5.6:8470' }
tasks { key: 1 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
|
apache-2.0
|
readbeyond/aeneas
|
aeneas/tests/long_test_task_misc.py
|
5
|
10851
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from aeneas.tools.execute_task import ExecuteTaskCLI
import aeneas.globalfunctions as gf
# TODO actually parse this file to know what extras
# (festival, speect, etc.) are available to test
EXTRA_TESTS = os.path.exists(os.path.join(os.path.expanduser("~"), ".aeneas.conf"))
class TestExecuteTaskCLI(unittest.TestCase):
def execute(self, parameters, expected_exit_code):
output_path = gf.tmp_directory()
params = ["placeholder"]
for p_type, p_value in parameters:
if p_type == "in":
params.append(gf.absolute_path(p_value, __file__))
elif p_type == "out":
params.append(os.path.join(output_path, p_value))
else:
params.append(p_value)
exit_code = ExecuteTaskCLI(use_sys=False).run(arguments=params)
gf.delete_directory(output_path)
self.assertEqual(exit_code, expected_exit_code)
def test_exec_tts_no_cache_empty_fragments(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tests/res/inputtext/plain_with_empty_lines.txt"),
("", "task_language=eng|is_text_type=plain|os_task_file_format=json"),
("out", "sonnet.json"),
("", "-r=\"tts_cache=False\"")
], 0)
def test_exec_tts_cache_empty_fragments(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tests/res/inputtext/plain_with_empty_lines.txt"),
("", "task_language=eng|is_text_type=plain|os_task_file_format=json"),
("out", "sonnet.json"),
("", "-r=\"tts_cache=True\"")
], 0)
def test_exec_tts_cache_empty_fragments_pure(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tests/res/inputtext/plain_with_empty_lines.txt"),
("", "task_language=eng|is_text_type=plain|os_task_file_format=json"),
("out", "sonnet.json"),
("", "-r=\"tts_cache=True|cew=False\"")
], 0)
def test_exec_tts_cache_empty_fragments_festival(self):
if not EXTRA_TESTS:
return
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tests/res/inputtext/plain_with_empty_lines.txt"),
("", "task_language=eng|is_text_type=plain|os_task_file_format=json"),
("out", "sonnet.json"),
("", "-r=\"tts=festival|tts_cache=True|cfw=True\"")
], 0)
def test_exec_tts_cache_empty_fragments_festival_pure(self):
if not EXTRA_TESTS:
return
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tests/res/inputtext/plain_with_empty_lines.txt"),
("", "task_language=eng|is_text_type=plain|os_task_file_format=json"),
("out", "sonnet.json"),
("", "-r=\"tts=festival|tts_cache=True|cfw=False\"")
], 0)
def test_exec_rateaggressive_remove_nonspeech(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_remove_nonspeech_add(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE|os_task_file_head_tail_format=add"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_remove_nonspeech_smaller_rate(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=12.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_remove_nonspeech_idiotic_rate(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=2.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_remove_nonspeech_nozero(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE|task_adjust_boundary_no_zero=True"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_nozero(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_no_zero=True"),
("out", "sonnet.srt")
], 0)
def test_exec_rateaggressive_nozero_add(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_no_zero=True|os_task_file_head_tail_format=add"),
("out", "sonnet.srt")
], 0)
def test_exec_mplain_rateaggressive_remove_nonspeech(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_remove_nonspeech_add(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE|os_task_file_head_tail_format=add"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_remove_nonspeech_smaller_rate(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=12.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_remove_nonspeech_idiotic_rate(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=2.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_remove_nonspeech_nozero(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE|task_adjust_boundary_no_zero=True"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_nozero(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_no_zero=True"),
("out", "sonnet.json")
], 0)
def test_exec_mplain_rateaggressive_nozero_add(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/mplain.txt"),
("", "task_language=eng|is_text_type=mplain|os_task_file_format=json|task_adjust_boundary_algorithm=rateaggressive|task_adjust_boundary_rate_value=14.000|task_adjust_boundary_no_zero=True|os_task_file_head_tail_format=add"),
("out", "sonnet.json")
], 0)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
y-tsutsu/mondja
|
mondja/middleware.py
|
1
|
1342
|
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
from mondja.pydenticon_wrapper import create_identicon
class MondjaMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
self.get_response = get_response
def process_request(self, request):
# Heroku用にidenticonを生成
users = User.objects.all()
for item in users:
if item.username != '':
create_identicon(item.username)
# ログインしている状態でloginページがリクエストされた場合はHomeにredirectする
if request.path == reverse('login') and request.method == 'GET' and request.user.is_authenticated:
# 例外としてuser_passes_test(is_staff)でredirectされた場合はHomeにredirectしない
if request.GET.get('need_staff') and not request.user.is_staff:
pass
# 例外としてuser_passes_test(is_superuser)でredirectされた場合はHomeにredirectしない
elif request.GET.get('need_superuser') and not request.user.is_superuser:
pass
# そのほかの場合はHomeにredirectする
else:
return redirect('/')
|
bsd-3-clause
|
SolaWing/ycmd
|
ycmd/tests/bindings/cpp_bindings_raises_exception_test.py
|
5
|
6258
|
# Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqaimport ycm_core
from ycmd.tests.test_utils import ClangOnly
from hamcrest import assert_that, calling, raises
import ycm_core
READONLY_MESSAGE = 'can\'t set attribute'
@ClangOnly
def CppBindings_ReadOnly_test():
assert_that( calling( ycm_core.CompletionData().__setattr__ )
.with_args( 'kind_', ycm_core.CompletionData().kind_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Location().__setattr__ )
.with_args( 'line_number_', 1 ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Location().__setattr__ )
.with_args( 'column_number_', 1 ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Location().__setattr__ )
.with_args( 'filename_', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Range().__setattr__ )
.with_args( 'end_', ycm_core.Range().end_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Range().__setattr__ )
.with_args( 'start_', ycm_core.Range().start_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.FixItChunk().__setattr__ )
.with_args( 'range', ycm_core.FixItChunk().range ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.FixItChunk().__setattr__ )
.with_args( 'replacement_text', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.FixIt().__setattr__ )
.with_args( 'chunks', ycm_core.FixIt().chunks ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.FixIt().__setattr__ )
.with_args( 'location', ycm_core.FixIt().location ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.FixIt().__setattr__ )
.with_args( 'text', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'ranges_', ycm_core.Diagnostic().ranges_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'location_', ycm_core.Diagnostic().location_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'location_extent_',
ycm_core.Diagnostic().location_extent_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'fixits_', ycm_core.Diagnostic().fixits_ ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'text_', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'long_formatted_text_', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.Diagnostic().__setattr__ )
.with_args( 'kind_', ycm_core.Diagnostic().kind_.WARNING ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.DocumentationData().__setattr__ )
.with_args( 'raw_comment', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.DocumentationData().__setattr__ )
.with_args( 'brief_comment', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.DocumentationData().__setattr__ )
.with_args( 'canonical_type', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.DocumentationData().__setattr__ )
.with_args( 'display_name', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( ycm_core.DocumentationData().__setattr__ )
.with_args( 'comment_xml', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
db = ycm_core.CompilationDatabase( 'foo' )
assert_that( calling( db.__setattr__ )
.with_args( 'database_directory', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
compilation_info = db.GetCompilationInfoForFile( 'foo.c' )
assert_that( calling( compilation_info.__setattr__ )
.with_args( 'compiler_working_dir_', 'foo' ),
raises( AttributeError, READONLY_MESSAGE ) )
assert_that( calling( compilation_info.__setattr__ )
.with_args( 'compiler_flags_', ycm_core.StringVector() ),
raises( AttributeError, READONLY_MESSAGE ) )
@ClangOnly
def CppBindings_CompilationInfo_NoInit_test():
assert_that( calling( ycm_core.CompilationInfoForFile ),
raises( TypeError, 'ycm_core.CompilationInfoForFile:'
' No constructor defined!' ) )
|
gpl-3.0
|
cmu-db/db-webcrawler
|
library/admin.py
|
2
|
2022
|
from django.contrib import admin
from models import *
class DependencyInline(admin.StackedInline):
model = Dependency
extra = 3
class ProjectTypeAdmin(admin.ModelAdmin):
list_display = [ 'name', 'filename', 'deployer_class' ]
## CLASS
class RepositorySourceAdmin(admin.ModelAdmin):
list_display = [ 'name', 'crawler_class', 'base_url', 'commit_url', 'search_token', ]
## CLASS
class CrawlerStatusAdmin(admin.ModelAdmin):
list_display = [ 'id', 'source', 'project_type', 'next_url', 'last_crawler_time', ]
## CLASS
class RepositoryAdmin(admin.ModelAdmin):
list_display = [ 'id', 'name', 'valid_project', 'get_project_type', 'source', 'commits_count', 'description', 'crawler_date', 'updated_date' ]
list_filter = ['project_type', 'valid_project', 'crawler_date', 'updated_date']
fieldsets = [
(None, {'fields': ['name', 'project_type', 'source', 'description']}),
('Date information', {'fields': ['created_at', 'updated_at', 'pushed_at']}),
]
def get_project_type(self, obj):
return obj.project_type.name
get_project_type.short_description = 'Project Type'
# CLASS
class AttemptAdmin(admin.ModelAdmin):
list_display = [ 'id', 'repo', 'result_name', 'start_time', 'stop_time' ]
list_filter = ['result', 'start_time']
raw_id_fields = [ 'repo' ]
#inlines = [DependencyInline]
# CLASS
class PackageAdmin(admin.ModelAdmin):
list_display = [ 'name', 'project_type', 'version', 'count' ]
list_filter = ['project_type']
# CLASS
# Register your models here.
admin.site.register(ProjectType, ProjectTypeAdmin)
admin.site.register(RepositorySource, RepositorySourceAdmin)
admin.site.register(CrawlerStatus, CrawlerStatusAdmin)
admin.site.register(Database)
admin.site.register(Repository, RepositoryAdmin)
admin.site.register(Package, PackageAdmin)
admin.site.register(Dependency)
admin.site.register(Attempt, AttemptAdmin)
admin.site.register(Module)
admin.site.register(WebStatistic)
admin.site.register(Statistic)
|
apache-2.0
|
Bismarrck/tensorflow
|
tensorflow/python/keras/engine/topology_test.py
|
1
|
44915
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer as input_layer_lib
from tensorflow.python.keras.engine import network as network_lib
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
class TopologyConstructionTest(keras_parameterized.TestCase):
@test_util.run_deprecated_v1
def test_get_updates(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_update(state_ops.assign_add(self.a, [[1.]],
name='unconditional_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.b, inputs,
name='conditional_update'),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(x2)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.updates), 2)
self.assertEqual(len(network.get_updates_for(x1)), 0)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.updates), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.updates), 3)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(x4)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
network.add_update(state_ops.assign_add(layer.a, [[1]]))
self.assertEqual(len(network.updates), 4)
self.assertEqual(len(network.get_updates_for(None)), 2)
network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
self.assertEqual(len(network.updates), 5)
self.assertEqual(len(network.get_updates_for(x4)), 2)
@test_util.run_in_graph_and_eager_modes()
def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,))
layer = keras.layers.BatchNormalization()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 2)
self.assertEqual(len(layer.get_updates_for(None)), 0)
@test_util.run_deprecated_v1
def test_get_losses(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_loss(math_ops.reduce_sum(self.a))
self.built = True
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(x2)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.losses), 2)
self.assertEqual(len(network.get_losses_for(x1)), 0)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.losses), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.losses), 3)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(x4)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
network.add_loss(math_ops.reduce_sum(layer.a))
self.assertEqual(len(network.losses), 4)
self.assertEqual(len(network.get_losses_for(None)), 2)
network.add_loss(math_ops.reduce_sum(x4), inputs=True)
self.assertEqual(len(network.losses), 5)
self.assertEqual(len(network.get_losses_for(x4)), 2)
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = input_layer_lib.Input(shape=(32,), name='input_a')
b = input_layer_lib.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(keras.layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = input_layer_lib.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(keras.layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = input_layer_lib.Input(shape=(32,))
b = input_layer_lib.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
@test_util.run_deprecated_v1
def testBasicNetwork(self):
# minimum viable network
x = input_layer_lib.Input(shape=(32,))
dense = keras.layers.Dense(2)
y = dense(x)
network = network_lib.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
@test_util.run_in_graph_and_eager_modes()
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
@test_util.run_deprecated_v1
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
# Would be `dropout/cond/Merge` by default
self.assertTrue(model.output.op.name.endswith('dropout/mul'))
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertTrue(out2.op.name.endswith('dropout/mul'))
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertTrue(model.output.op.name.endswith('dropout/mul'))
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertListEqual(dense._inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense._inbound_nodes[1].input_tensors, [b])
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_layer(self):
with self.cached_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model.compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
@test_util.run_deprecated_v1
def test_recursion(self):
with self.cached_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
self.assertEqual(len(model.inputs), 2)
g, h = model([e, f])
self.assertEqual(len(model.inputs), 2)
self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')
self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())
self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_multi_output_recursion(self):
with self.cached_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.get_shape().as_list(), [None, 5])
self.assertListEqual(q.get_shape().as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
@test_util.run_deprecated_v1
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
self.assertEqual(len(model.inputs), 2)
m, n = model([j, k])
self.assertEqual(len(model.inputs), 2)
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
@test_util.run_in_graph_and_eager_modes()
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])
@test_util.run_deprecated_v1
def testMaskingSingleInput(self):
class MaskedLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.executing_eagerly():
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(
self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
else:
x = input_layer_lib.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = network_lib.Network(x, y)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
@test_util.run_deprecated_v1
def test_activity_regularization_with_model_composition(self):
def reg(x):
return math_ops.reduce_sum(x)
net_a_input = input_layer_lib.Input((2,))
net_a = net_a_input
net_a = keras.layers.Dense(2, kernel_initializer='ones',
use_bias=False,
activity_regularizer=reg)(net_a)
model_a = keras.Model([net_a_input], [net_a])
net_b_input = input_layer_lib.Input((2,))
net_b = model_a(net_b_input)
model_b = keras.Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
self.assertEqual(loss, 4.)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth(self):
x_val = np.random.random((10, 5))
x = input_layer_lib.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
m.run_eagerly = testing_utils.should_run_eagerly()
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
input_shape = (16, 9, 3)
input_layer = input_layer_lib.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
m.run_eagerly = testing_utils.should_run_eagerly()
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_explicit_training_argument(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dropout(0.5)(a)
base_model = keras.models.Model(a, b)
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=False)
model = keras.models.Model(a, b)
x = np.ones((100, 2))
y = np.ones((100, 2))
model.compile(
optimizer=gradient_descent.SGD(),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=True)
model = keras.models.Model(a, b)
preds = model.predict(x)
self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.
@keras_parameterized.run_all_keras_modes
def test_multi_output_model_with_none_masking(self):
def func(x):
return [x * 0.2, x * 0.3]
def output_shape(input_shape):
return [input_shape, input_shape]
i = keras.layers.Input(shape=(3, 2, 1))
o = keras.layers.Lambda(function=func, output_shape=output_shape)(i)
self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))
self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))
o = keras.layers.add(o)
model = keras.Model(i, o)
model.run_eagerly = testing_utils.should_run_eagerly()
i2 = keras.layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = keras.Model(i2, o2)
model2.run_eagerly = testing_utils.should_run_eagerly()
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
assert out.shape == (4, 3, 2, 1)
self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
@keras_parameterized.run_all_keras_modes
def test_constant_initializer_with_numpy(self):
initializer = keras.initializers.Constant(np.ones((3, 2)))
model = keras.models.Sequential()
model.add(
keras.layers.Dense(2, input_shape=(3,), kernel_initializer=initializer))
model.add(keras.layers.Dense(3))
model.compile(
loss='mse',
optimizer=gradient_descent.SGD(),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
class DeferredModeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testSimpleNetworkBuilding(self):
inputs = input_layer_lib.Input(shape=(32,))
if context.executing_eagerly():
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = keras.layers.Dense(2)(inputs)
if context.executing_eagerly():
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = keras.layers.Dense(4)(x)
network = network_lib.Network(inputs, outputs)
self.assertIsInstance(network, network_lib.Network)
if context.executing_eagerly():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
def testMultiIONetworkBuilding(self):
input_a = input_layer_lib.Input(shape=(32,))
input_b = input_layer_lib.Input(shape=(16,))
a = keras.layers.Dense(16)(input_a)
class AddLayer(keras.layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = keras.layers.Dense(2)(c)
network = network_lib.Network([input_a, input_b], [a, c])
if context.executing_eagerly():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase):
def _testShapeInference(self, model, input_shape, expected_output_shape):
input_value = np.random.random(input_shape)
output_value = model.predict(input_value)
self.assertEqual(output_value.shape, expected_output_shape)
@test_util.run_in_graph_and_eager_modes()
def testSingleInputCase(self):
class LayerWithOneInput(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
layer = LayerWithOneInput()
if context.executing_eagerly():
self.assertEqual(
layer.compute_output_shape((None, 3)).as_list(), [None, 4])
# As a side-effect, compute_output_shape builds the layer.
self.assertTrue(layer.built)
# We can still query the layer's compute_output_shape with compatible
# input shapes.
self.assertEqual(
layer.compute_output_shape((6, 3)).as_list(), [6, 4])
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testMultiInputOutputCase(self):
class MultiInputOutputLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
a = keras.backend.dot(inputs[0], self.w)
b = a + inputs[1]
return [a, b]
input_a = input_layer_lib.Input(shape=(3,))
input_b = input_layer_lib.Input(shape=(4,))
output_a, output_b = MultiInputOutputLayer()([input_a, input_b])
model = keras.Model([input_a, input_b], [output_a, output_b])
output_a_val, output_b_val = model.predict(
[np.random.random((2, 3)), np.random.random((2, 4))])
self.assertEqual(output_a_val.shape, (2, 4))
self.assertEqual(output_b_val.shape, (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testTrainingArgument(self):
class LayerWithTrainingArg(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs, training):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
outputs = LayerWithTrainingArg()(inputs, training=False)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShape(self):
class Model(keras.Model):
def __init__(self):
super(Model, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.fc = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.fc(x)
return x
model = Model()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithCompoundModel(self):
class BasicBlock(keras.Model):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.dense = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.dense(x)
return x
class CompoundModel(keras.Model):
def __init__(self):
super(CompoundModel, self).__init__()
self.block = BasicBlock()
def call(self, x):
x = self.block(x) # pylint: disable=not-callable
return x
model = CompoundModel()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input) # pylint: disable=not-callable
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithFunctinalAPI(self):
class BasicBlock(keras.Model):
# Inherting from keras.layers.Layer since we are calling this layer
# inside a model created using functional API.
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
def call(self, x):
x = self.conv1(x)
return x
input_layer = keras.layers.Input(shape=(None, None, 1))
x = BasicBlock()(input_layer)
x = keras.layers.GlobalAveragePooling2D()(x)
output_layer = keras.layers.Dense(3)(x)
model = keras.Model(inputs=input_layer, outputs=output_layer)
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@keras_parameterized.run_all_keras_modes
def test_sequential_as_downstream_of_masking_layer(self):
inputs = keras.layers.Input(shape=(3, 4))
x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)
s = keras.Sequential()
s.add(keras.layers.Dense(5, input_shape=(4,)))
x = keras.layers.wrappers.TimeDistributed(s)(x)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(
optimizer=rmsprop.RMSPropOptimizer(1e-3),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4)).astype('float32')
for i in range(4):
model_input[i, i:, :] = 0.
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
if not context.executing_eagerly():
# Note: this doesn't work in eager due to DeferredTensor/ops compatibility
# issue.
mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]
mask_outputs += [model.layers[2].compute_mask(
model.layers[2].input, mask_outputs[-1])]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))
self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))
class GraphUtilsTest(test.TestCase):
@test_util.run_deprecated_v1
def testGetReachableFromInputs(self):
with self.cached_session():
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1]),
{pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]),
{pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_3]),
{pl_3, x_3, x_5, x_3.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([x_3]),
{x_3, x_5, x_5.op})
if __name__ == '__main__':
test.main()
|
apache-2.0
|
lincolnloop/django-categories
|
categories/south_migrations/0010_add_field_categoryrelation_category.py
|
14
|
4801
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Category.parent'
db.alter_column('categories_category', 'parent_id', self.gf('mptt.fields.TreeForeignKey')(null=True, to=orm['categories.Category']))
# Changing field 'Category.order'
db.alter_column('categories_category', 'order', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'CategoryRelation.category'
db.add_column('categories_categoryrelation', 'category', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['categories.Category']))
def backwards(self, orm):
# Changing field 'Category.parent'
db.alter_column('categories_category', 'parent_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['categories.Category']))
# Changing field 'Category.order'
db.alter_column('categories_category', 'order', self.gf('django.db.models.fields.IntegerField')(null=True))
# Deleting field 'CategoryRelation.category'
db.delete_column('categories_categoryrelation', 'category_id')
models = {
'categories.category': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'alternate_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['categories.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'categories.categoryrelation': {
'Meta': {'object_name': 'CategoryRelation'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'new_cats'", 'null': 'True', 'to': "orm['categories.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'relation_type': ('django.db.models.fields.CharField', [], {'max_length': "'200'", 'null': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['categories']
|
apache-2.0
|
zederson/Arduino
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/packages/ordered_dict.py
|
1093
|
8936
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
lgpl-2.1
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/scipy/special/tests/test_basic.py
|
17
|
132165
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, dec, TestCase, run_module_suite, assert_allclose,
assert_raises, assert_array_almost_equal_nulp)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk, zeta
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
from scipy._lib._version import NumpyVersion
import math
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes._gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
# Earlier numpy version don't guarantee that npy_cexp conforms to C99.
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@dec.knownfailureif(True, 'The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes._gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
# earlier numpy version don't guarantee that npy_clog conforms to C99
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
assert_allclose(zeta(2,2), pi**2/6 - 1, rtol=1e-12)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_zeta_1arg(self):
assert_allclose(zeta(2), pi**2/6, rtol=1e-12)
assert_allclose(zeta(4), pi**4/90, rtol=1e-12)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincinf(self):
gama = special.gammainc(0.5, np.inf)
assert_equal(gama,1.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinf(self):
gama = special.gammaincc(0.5,np.inf)
assert_equal(gama,0.0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
mit
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/pyRiemann-0.2.2/pyriemann/utils/mean.py
|
2
|
5426
|
import numpy
from .base import sqrtm, invsqrtm, powm, logm, expm
###############################################################
# Means
###############################################################
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None):
"""Return the mean covariance matrix according to the Riemannian metric.
The procedure is similar to a gradient descent minimizing the sum of
riemannian distance to the mean.
.. math::
\mathbf{C} = \\arg\min{(\sum_i \delta_R ( \mathbf{C} , \mathbf{C}_i)^2)}
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the gradient descent. If None the Arithmetic mean is used
:returns: the mean covariance matrix
"""
# init
Nt, Ne, Ne = covmats.shape
if init is None:
C = numpy.mean(covmats, axis=0)
else:
C = init
k = 0
nu = 1.0
tau = numpy.finfo(numpy.float64).max
crit = numpy.finfo(numpy.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter) and (nu > tol):
k = k + 1
C12 = sqrtm(C)
Cm12 = invsqrtm(C)
T = numpy.zeros((Ne, Ne))
for index in range(Nt):
tmp = numpy.dot(numpy.dot(Cm12, covmats[index, :, :]), Cm12)
T += logm(numpy.matrix(tmp))
#J = mean(T,axis=0)
J = T / Nt
crit = numpy.linalg.norm(J, ord='fro')
h = nu * crit
C = numpy.matrix(C12 * expm(nu * J) * C12)
if h < tau:
nu = 0.95 * nu
tau = h
else:
nu = 0.5 * nu
return C
def mean_logeuclid(covmats):
"""Return the mean covariance matrix according to the log-euclidean metric :
.. math::
\mathbf{C} = \exp{(\\frac{1}{N} \sum_i \log{\mathbf{C}_i})}
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:returns: the mean covariance matrix
"""
Nt, Ne, Ne = covmats.shape
T = numpy.zeros((Ne, Ne))
for index in range(Nt):
T += logm(numpy.matrix(covmats[index, :, :]))
C = expm(T / Nt)
return C
def mean_logdet(covmats, tol=10e-5, maxiter=50, init=None):
"""Return the mean covariance matrix according to the logdet metric.
This is an iterative procedure where the update is:
.. math::
\mathbf{C} = \left(\sum_i \left( 0.5 \mathbf{C} + 0.5 \mathbf{C}_i \\right)^{-1} \\right)^{-1}
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:returns: the mean covariance matrix
"""
Nt, Ne, Ne = covmats.shape
if init is None:
C = numpy.mean(covmats, axis=0)
else:
C = init
k = 0
crit = numpy.finfo(numpy.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = numpy.zeros((Ne, Ne))
for Ci in covmats:
J += numpy.linalg.inv(0.5 * Ci + 0.5 * C)
J = J / Nt
Cnew = numpy.linalg.inv(J)
crit = numpy.linalg.norm(Cnew - C, ord='fro')
C = Cnew
if k == maxiter:
print 'Max iter reach'
return C
def mean_euclid(covmats):
"""Return the mean covariance matrix according to the euclidean metric :
.. math::
\mathbf{C} = \\frac{1}{N} \sum_i \mathbf{C}_i
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:returns: the mean covariance matrix
"""
return numpy.mean(covmats, axis=0)
def mean_ale(covmats):
"""Return the mean covariance matrix according using the ALE algorithme
described in :
M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint Diagonalization and Geometric Mean of Symmetric Positive Definite Matrices', PLoS ONE, 2015
The implementation is not done yet.
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:returns: the mean covariance matrix
"""
raise NotImplementedError
def mean_identity(covmats):
"""Return the identity matrix corresponding to the covmats sit size
.. math::
\mathbf{C} = \mathbf{I}_d
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:returns: the identity matrix of size Nchannels
"""
C = numpy.eye(covmats.shape[1])
return C
def mean_covariance(covmats, metric='riemann', *args):
"""Return the mean covariance matrix according to the metric
:param covmats: Covariance matrices set, Ntrials X Nchannels X Nchannels
:param metric: the metric (Default value 'riemann'), can be : 'riemann' , 'logeuclid' , 'euclid' , 'logdet', 'indentity'
:param args: the argument passed to the sub function
:returns: the mean covariance matrix
"""
options = {'riemann': mean_riemann,
'logeuclid': mean_logeuclid,
'euclid': mean_euclid,
'identity': mean_identity,
'logdet': mean_logdet}
C = options[metric](covmats, *args)
return C
|
bsd-3-clause
|
petabyte/bedrock
|
bedrock/thunderbird/urls.py
|
5
|
2312
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import patterns, url
import views
import bedrock.releasenotes.views
from bedrock.releasenotes import version_re
from bedrock.mozorg.util import page
latest_re = r'^thunderbird(?:/(?P<version>%s))?/%s/$'
thunderbird_releasenotes_re = latest_re % (version_re, r'releasenotes')
sysreq_re = latest_re % (version_re, 'system-requirements')
channel_re = '(?P<channel>beta|earlybird)'
urlpatterns = patterns('',
url(r'^thunderbird/(?:%s/)?all/$' % channel_re,
views.all_downloads, name='thunderbird.all'),
url('^thunderbird/releases/$', bedrock.releasenotes.views.releases_index,
{'product': 'Thunderbird'}, name='thunderbird.releases.index'),
url(thunderbird_releasenotes_re, bedrock.releasenotes.views.release_notes,
{'product': 'Thunderbird'}, name='thunderbird.releasenotes'),
url(sysreq_re, bedrock.releasenotes.views.system_requirements,
{'product': 'Thunderbird'}, name='thunderbird.system_requirements'),
url('^thunderbird/latest/system-requirements/$',
bedrock.releasenotes.views.latest_sysreq,
{'product': 'thunderbird', 'channel': 'release'}, name='thunderbird.sysreq'),
url('^thunderbird/latest/releasenotes/$',
bedrock.releasenotes.views.latest_notes,
{'product': 'thunderbird'}, name='thunderbird.notes'),
page('thunderbird', 'thunderbird/index.html'),
page('thunderbird/features', 'thunderbird/features.html'),
page('thunderbird/email-providers', 'thunderbird/email-providers.html'),
page('thunderbird/organizations', 'thunderbird/organizations.html'),
# Start pages by channel
page('thunderbird/release/start', 'thunderbird/start/release.html'),
page('thunderbird/beta/start', 'thunderbird/start/beta.html'),
page('thunderbird/earlybird/start', 'thunderbird/start/earlybird.html'),
page('thunderbird/nightly/start', 'thunderbird/start/daily.html'),
# What's New pages by channel
page('thunderbird/earlybird/whatsnew', 'thunderbird/whatsnew/earlybird.html'),
page('thunderbird/nightly/whatsnew', 'thunderbird/whatsnew/daily.html'),
)
|
mpl-2.0
|
incaser/odoo-odoo
|
addons/base_gengo/__openerp__.py
|
312
|
2117
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Translations through Gengo API',
'version': '0.1',
'category': 'Tools',
'description': """
Automated Translations through Gengo API
========================================
This module will install passive scheduler job for automated translations
using the Gengo API. To activate it, you must
1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`
2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard.
This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'data': [
'gengo_sync_schedular_data.xml',
'ir_translation.xml',
'res_company_view.xml',
'wizard/base_gengo_translations_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ghjm/ansible
|
hacking/build-ansible.py
|
21
|
2905
|
#!/usr/bin/env python3
# coding: utf-8
# PYTHON_ARGCOMPLETE_OK
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import sys
from straight.plugin import load
try:
import argcomplete
except ImportError:
argcomplete = None
def build_lib_path(this_script=__file__):
"""Return path to the common build library directory."""
hacking_dir = os.path.dirname(this_script)
libdir = os.path.abspath(os.path.join(hacking_dir, 'build_library'))
return libdir
def ansible_lib_path(this_script=__file__):
"""Return path to the common build library directory."""
hacking_dir = os.path.dirname(this_script)
libdir = os.path.abspath(os.path.join(hacking_dir, '..', 'lib'))
return libdir
sys.path.insert(0, ansible_lib_path())
sys.path.insert(0, build_lib_path())
from build_ansible import commands, errors
def create_arg_parser(program_name):
"""
Creates a command line argument parser
:arg program_name: The name of the script. Used in help texts
"""
parser = argparse.ArgumentParser(prog=program_name,
description="Implements utilities to build Ansible")
return parser
def main():
"""
Start our run.
"It all starts here"
"""
subcommands = load('build_ansible.command_plugins', subclasses=commands.Command)
arg_parser = create_arg_parser(os.path.basename(sys.argv[0]))
arg_parser.add_argument('--debug', dest='debug', required=False, default=False,
action='store_true',
help='Show tracebacks and other debugging information')
subparsers = arg_parser.add_subparsers(title='Subcommands', dest='command',
help='for help use build-ansible.py SUBCOMMANDS -h')
subcommands.pipe('init_parser', subparsers.add_parser)
if argcomplete:
argcomplete.autocomplete(arg_parser)
args = arg_parser.parse_args(sys.argv[1:])
if args.command is None:
print('Please specify a subcommand to run')
sys.exit(1)
for subcommand in subcommands:
if subcommand.name == args.command:
command = subcommand
break
else:
# Note: We should never trigger this because argparse should shield us from it
print('Error: {0} was not a recognized subcommand'.format(args.command))
sys.exit(1)
try:
retval = command.main(args)
except (errors.DependencyError, errors.MissingUserInput, errors.InvalidUserInput) as e:
print(e)
if args.debug:
raise
sys.exit(2)
sys.exit(retval)
if __name__ == '__main__':
main()
|
gpl-3.0
|
karan/warehouse
|
tests/unit/accounts/test_services.py
|
2
|
6915
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from zope.interface.verify import verifyClass
from warehouse.accounts import services
from warehouse.accounts.interfaces import IUserService
from ...common.db.accounts import UserFactory, EmailFactory
class TestDatabaseUserService:
def test_verify_service(self):
assert verifyClass(IUserService, services.DatabaseUserService)
def test_service_creation(self, monkeypatch):
crypt_context_obj = pretend.stub()
crypt_context_cls = pretend.call_recorder(
lambda schemes, deprecated: crypt_context_obj
)
monkeypatch.setattr(services, "CryptContext", crypt_context_cls)
session = pretend.stub()
service = services.DatabaseUserService(session)
assert service.db is session
assert service.hasher is crypt_context_obj
assert crypt_context_cls.calls == [
pretend.call(
schemes=[
"bcrypt_sha256",
"bcrypt",
"django_bcrypt",
"unix_disabled",
],
deprecated=["auto"],
),
]
def test_find_userid_nonexistant_user(self, db_session):
service = services.DatabaseUserService(db_session)
assert service.find_userid("my_username") is None
def test_find_userid_existing_user(self, db_session):
user = UserFactory.create()
service = services.DatabaseUserService(db_session)
assert service.find_userid(user.username) == user.id
def test_check_password_nonexistant_user(self, db_session):
service = services.DatabaseUserService(db_session)
assert not service.check_password(1, None)
def test_check_password_invalid(self, db_session):
user = UserFactory.create()
service = services.DatabaseUserService(db_session)
service.hasher = pretend.stub(
verify_and_update=pretend.call_recorder(
lambda l, r: (False, None)
),
)
assert not service.check_password(user.id, "user password")
assert service.hasher.verify_and_update.calls == [
pretend.call("user password", user.password),
]
def test_check_password_valid(self, db_session):
user = UserFactory.create()
service = services.DatabaseUserService(db_session)
service.hasher = pretend.stub(
verify_and_update=pretend.call_recorder(lambda l, r: (True, None)),
)
assert service.check_password(user.id, "user password")
assert service.hasher.verify_and_update.calls == [
pretend.call("user password", user.password),
]
def test_check_password_updates(self, db_session):
user = UserFactory.create()
password = user.password
service = services.DatabaseUserService(db_session)
service.hasher = pretend.stub(
verify_and_update=pretend.call_recorder(
lambda l, r: (True, "new password")
),
)
assert service.check_password(user.id, "user password")
assert service.hasher.verify_and_update.calls == [
pretend.call("user password", password),
]
assert user.password == "new password"
def test_create_user(self, db_session):
user = UserFactory.build()
email = "[email protected]"
service = services.DatabaseUserService(db_session)
new_user = service.create_user(username=user.username,
name=user.name,
password=user.password,
email=email)
db_session.flush()
user_from_db = service.get_user(new_user.id)
assert user_from_db.username == user.username
assert user_from_db.name == user.name
assert user_from_db.email == email
def test_update_user(self, db_session):
user = UserFactory.create()
service = services.DatabaseUserService(db_session)
new_name = "new username"
service.update_user(user.id, username=new_name)
user_from_db = service.get_user(user.id)
assert user_from_db.username == user.username
def test_verify_email(self, db_session):
service = services.DatabaseUserService(db_session)
user = UserFactory.create()
EmailFactory.create(user=user, primary=True,
verified=False)
EmailFactory.create(user=user, primary=False,
verified=False)
service.verify_email(user.id, user.emails[0].email)
assert user.emails[0].verified
assert not user.emails[1].verified
def test_find_by_email(self, db_session):
service = services.DatabaseUserService(db_session)
user = UserFactory.create()
EmailFactory.create(user=user, primary=True, verified=False)
found_userid = service.find_userid_by_email(user.emails[0].email)
db_session.flush()
assert user.id == found_userid
def test_find_by_email_not_found(self, db_session):
service = services.DatabaseUserService(db_session)
assert service.find_userid_by_email("something") is None
def test_create_login_success(self, db_session):
service = services.DatabaseUserService(db_session)
user = service.create_user(
"test_user", "test_name", "test_password", "test_email")
assert user.id is not None
# now make sure that we can log in as that user
assert service.check_password(user.id, "test_password")
def test_create_login_error(self, db_session):
service = services.DatabaseUserService(db_session)
user = service.create_user(
"test_user", "test_name", "test_password", "test_email")
assert user.id is not None
assert not service.check_password(user.id, "bad_password")
def test_database_login_factory(monkeypatch):
service_obj = pretend.stub()
service_cls = pretend.call_recorder(lambda session: service_obj)
monkeypatch.setattr(services, "DatabaseUserService", service_cls)
context = pretend.stub()
request = pretend.stub(db=pretend.stub())
assert services.database_login_factory(context, request) is service_obj
assert service_cls.calls == [pretend.call(request.db)]
|
apache-2.0
|
glenngillen/dotfiles
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/parso/python/parser.py
|
2
|
8227
|
from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
for c in children:
c.parent = node
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
for n in all_nodes:
n.parent = node
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
|
mit
|
sdopoku/flask-hello-world
|
venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.py
|
357
|
3833
|
from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, shutil, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name()+'.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink,(self.target,),"Removing "+self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src,dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/','CVS/':
if src.startswith(skip) or '/'+skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename,ext = os.path.splitext(self.target)
filename += '-nspkg.pth'; self.outputs.append(filename)
log.info("Installing %s",filename)
if not self.dry_run:
f = open(filename,'wt')
for pkg in nsp:
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,types,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp=list(nsp)
nsp.sort() # set up shorter names first
return nsp
|
gpl-2.0
|
hobinyoon/hadoop-1.1.2
|
src/contrib/hod/testing/main.py
|
182
|
2928
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re
myPath = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myPath)
testingDir = os.path.join(rootDirectory, "testing")
sys.path.append(rootDirectory)
from testing.lib import printSeparator, printLine
moduleList = []
allList = []
excludes = [
]
# Build a module list by scanning through all files in testingDir
for file in os.listdir(testingDir):
if(re.search(r".py$", file) and re.search(r"^test", file)):
# All .py files with names starting in 'test'
module = re.sub(r"^test","",file)
module = re.sub(r".py$","",module)
allList.append(module)
if module not in excludes:
moduleList.append(module)
printLine("All testcases - %s" % allList)
printLine("Excluding the testcases - %s" % excludes)
printLine("Executing the testcases - %s" % moduleList)
testsResult = 0
# Now import each of these modules and start calling the corresponding
#testSuite methods
for moduleBaseName in moduleList:
try:
module = "testing.test" + moduleBaseName
suiteCaller = "Run" + moduleBaseName + "Tests"
printSeparator()
printLine("Running %s" % suiteCaller)
# Import the corresponding test cases module
imported_module = __import__(module , fromlist=[suiteCaller] )
# Call the corresponding suite method now
testRes = getattr(imported_module, suiteCaller)()
testsResult = testsResult + testRes
printLine("Finished %s. TestSuite Result : %s\n" % \
(suiteCaller, testRes))
except ImportError, i:
# Failed to import a test module
printLine(i)
testsResult = testsResult + 1
pass
except AttributeError, n:
# Failed to get suiteCaller from a test module
printLine(n)
testsResult = testsResult + 1
pass
except Exception, e:
# Test module suiteCaller threw some exception
printLine("%s failed. \nReason : %s" % (suiteCaller, e))
printLine("Skipping %s" % suiteCaller)
testsResult = testsResult + 1
pass
if testsResult != 0:
printSeparator()
printLine("Total testcases with failure or error : %s" % testsResult)
sys.exit(testsResult)
|
apache-2.0
|
h2oai/h2o-dev
|
h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_HDFS_orc_parser.py
|
4
|
3281
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
#----------------------------------------------------------------------
# Purpose: This test will test orc-parser in HDFS parsing multiple
# orc files collected by Tom K.
#----------------------------------------------------------------------
def hdfs_orc_parser():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
numElements2Compare = 10
tol_time = 200
tol_numeric = 1e-5
hdfs_name_node = pyunit_utils.hadoop_namenode()
if pyunit_utils.cannaryHDFSTest(hdfs_name_node, "/datasets/orc_parser/orc/orc_split_elim.orc"):
print("Your hive-exec version is too old. Orc parser test {0} is "
"skipped.".format("pyunit_INTERNAL_HDFS_orc_parser.py"))
pass
else:
allOrcFiles = ["/datasets/orc_parser/orc/TestOrcFile.columnProjection.orc",
"/datasets/orc_parser/orc/bigint_single_col.orc",
"/datasets/orc_parser/orc/TestOrcFile.emptyFile.orc",
"/datasets/orc_parser/orc/bool_single_col.orc",
"/datasets/orc_parser/orc/demo-11-zlib.orc",
"/datasets/orc_parser/orc/TestOrcFile.testDate1900.orc",
"/datasets/orc_parser/orc/demo-12-zlib.orc",
"/datasets/orc_parser/orc/TestOrcFile.testDate2038.orc",
"/datasets/orc_parser/orc/double_single_col.orc",
"/datasets/orc_parser/orc/TestOrcFile.testMemoryManagementV11.orc",
"/datasets/orc_parser/orc/float_single_col.orc",
"/datasets/orc_parser/orc/TestOrcFile.testMemoryManagementV12.orc",
"/datasets/orc_parser/orc/int_single_col.orc",
"/datasets/orc_parser/orc/TestOrcFile.testPredicatePushdown.orc",
"/datasets/orc_parser/orc/nulls-at-end-snappy.orc",
"/datasets/orc_parser/orc/TestOrcFile.testSnappy.orc",
"/datasets/orc_parser/orc/orc_split_elim.orc",
"/datasets/orc_parser/orc/TestOrcFile.testStringAndBinaryStatistics.orc",
"/datasets/orc_parser/orc/TestOrcFile.testStripeLevelStats.orc",
"/datasets/orc_parser/orc/smallint_single_col.orc",
"/datasets/orc_parser/orc/string_single_col.orc",
"/datasets/orc_parser/orc/tinyint_single_col.orc",
"/datasets/orc_parser/orc/TestOrcFile.testWithoutIndex.orc"]
for fIndex in range(len(allOrcFiles)):
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, allOrcFiles[fIndex])
tab_test = h2o.import_file(url_orc)
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_orc_parser)
else:
hdfs_orc_parser()
|
apache-2.0
|
kho0810/flaskr
|
lib/sqlalchemy/dialects/mysql/__init__.py
|
33
|
1171
|
# mysql/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
apache-2.0
|
filipefigcorreia/asaanalyzer
|
asaanalyser/common/model.py
|
1
|
37599
|
# -*- coding: utf-8 -*-
from sys import stdout
import sqlite3
import math
import csv
from datetime import timedelta
from util import parse_date, total_seconds, groupby
CONSIDER_ONLY_COMPLETED_TASKS = False # this is ok because nothing significant is missing from incomplete tasks
class ASADatabase(object):
def __init__(self, db_filename):
"""db_filename: the database filename to open with full or relative path"""
self.db_filename = db_filename
def _get_full_iterable_data(self):
conn = sqlite3.connect(self.db_filename)
c = conn.cursor()
stats_table_present = False
c.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='asa_accurate_analytics';")
if c.fetchone()[0] == 1:
stats_table_present = True
c.close()
if stats_table_present:
c = conn.cursor()
c.execute('SELECT id, resource_type, resource_id, operation, username, time_started, time_ended '
'FROM asa_accurate_analytics '
'WHERE NOT time_ended IS NULL')
result = c.fetchall()
c.close()
conn.close()
else:
result = []
return result
def get_full_dataset(self):
return ASADBDataSet(self._get_full_iterable_data())
class FakeASADatabase(object):
def __init__(self, csv_filename):
self.csv_filename = csv_filename
def get_full_dataset(self):
data = []
with open(self.csv_filename, 'rbU') as csvfile:
reader = csv.reader(csvfile)
reader.next() # skip the headers row
data = list(reader)
return ASADBDataSet(data)
class ResourceTypes:
wiki = "wiki"
search = "search"
asa_artifact = "asa_artifact"
asa_spec = "asa_spec"
asa_index = "index"
asa_search = "asa_search"
class Measurements:
wiki_view = 'wiki_view'
wiki_edit = 'wiki_edit'
search = 'search'
asa_artifact_view = 'asa_artifact_view'
asa_artifact_edit = 'asa_artifact_edit'
asa_index = 'index_view'
asa_search = 'asa_search_view'
pretty_names = {wiki_view: "Wiki Pages", search: "Search", asa_artifact_view: "ASA Artifacts", asa_index: "ASA Index"}
@classmethod
def to_list(cls):
return [cls.wiki_view, cls.wiki_edit, cls.search, cls.asa_artifact_view, cls.asa_artifact_edit, cls.asa_index, cls.asa_search]
@classmethod
def to_ids_list_that_matter(cls):
return [(0, cls.wiki_view), (2, cls.search), (3, cls.asa_artifact_view), (5, cls.asa_index)]
@classmethod
def name_to_pretty_name(cls, name):
return cls.pretty_names[name]
class ASAExperiment(object):
def __init__(self, name, groups, questionnaire_hypos, questionnaire_questions):
self.name = name
self.groups = groups
assert(len(groups)==2) # For simplicity sake. May eventually be made more generic.
self.questionnaire_hypos = questionnaire_hypos
self.questionnaire_questions = questionnaire_questions
def _get_groups_in_seconds(self, data_array, columns=(3,None)):
"""Gets dates in seconds for each of the experiment's groups"""
groups = []
for group in self.get_groups(data_array, columns):
group = [[total_seconds(time) for time in dimension] for dimension in group]
groups.append(group)
return groups
def get_groups(self, data_array, columns=(3,None), group_column=0):
"""Partitions the data in the specified columns by the specified groups"""
groups_names = [g.name for g in self.groups]
groups = []
for group_name in groups_names:
group = data_array[:, group_column] == group_name
group = data_array[group][:, columns[0]:columns[1]].transpose()
groups.append(group)
return groups
def get_questionnaire_hypothesis(self):
if self.questionnaire_hypos is None: return None
if len(self.questionnaire_hypos)==0: return []
return self.questionnaire_hypos[1]
def get_questionnaire_questions(self):
if self.questionnaire_questions is None: return None
if len(self.questionnaire_questions)==0: return []
return self.questionnaire_questions[1]
def run_tests(self):
"""Processes all the data, runs the statistical tests, and returns the results"""
import numpy as np
from stats import get_mww, get_ttest_equal_var, get_ttest_diff_var, get_levene, get_simple_stats, get_shapiro
calculations = []
headers_task_durations = ['group', 'user', 'task', 'duration']
headers_activity_times = ['group', 'user'] + Measurements.to_list()
tasks_data = ASADataSet(headers_task_durations, [])
tasks_data_secs = ASADataSet(headers_task_durations, [])
activities_data = ASADataSet(headers_activity_times, [])
activities_data_secs = ASADataSet(headers_activity_times, [])
stdout.write("Running tests for experiment '{0}'\n".format(self.name))
stdout.write("Loading tasks: ")
stdout.flush()
for group in self.groups:
tasks_rows = [(group.name,) + row for row in group.get_task_times()]
tasks_data.data.extend(tasks_rows)
tasks_data_secs.data.extend([row[0:3] + tuple(total_seconds(v) for v in row[3:]) for row in tasks_rows])
stdout.write(".")
stdout.flush()
calculations.append(("task_times", tasks_data))
calculations.append(("task_times_secs", tasks_data_secs))
stdout.write(" [finished]\n")
stdout.flush()
stdout.write("Loading activities: ")
stdout.flush()
for group in self.groups:
activities_rows = [[group.name] + row for row in group.get_activity_times()]
activities_data.data.extend(activities_rows)
activities_data_secs.data.extend([row[0:2] + [total_seconds(v) for v in row[2:]] for row in activities_rows])
stdout.write(".")
stdout.flush()
calculations.append(("activity_times", activities_data))
calculations.append(("activity_times_secs", activities_data_secs))
stdout.write(" [finished]\n")
stdout.flush()
###### Run statistical tests ######
stdout.write("Running statistical tests")
stdout.flush()
tasks_times_array = np.array(tasks_data.data)
activity_times_array = np.array(activities_data.data)
sstats_headers = ['group', 'sum', 'average', 'mean', 'median', 'std', 'var', 'count']
mww_headers = ['u', 'p_value_twotailed', 'p_value_lessthan', 'p_value_greaterthan']
ttest_headers = ['t_twotailed', 'p_value_twotailed', 't_lessthan', 'p_value_lessthan', 't_greaterthan', 'p_value_greaterthan', 'for_variance']
levene_headers = ['p_value', 'w']
shapiro_headers = ['group', 'activity', 'p_value', 'w']
### task tests
sstats_results = ASADataSet(['task'] + sstats_headers, [])
mww_results = ASADataSet(['task'] + mww_headers, [])
ttest_results = ASADataSet(['task'] + ttest_headers, [])
levene_results = ASADataSet(['task'] + levene_headers, [])
tasks_names = sorted(set(tasks_times_array[:, 2]))
for task_name in tasks_names:
task_array = tasks_times_array[tasks_times_array[:, 2] == task_name]
groups_data = self._get_groups_in_seconds(task_array)
group1_data = groups_data[0][0] # one "dimension" assumed, as task times imply only one column
group2_data = groups_data[1][0] # one "dimension" assumed, as task times imply only one column
sstats_results.data.append((task_name, self.groups[0].name) + get_simple_stats(group1_data))
sstats_results.data.append((task_name, self.groups[1].name) + get_simple_stats(group2_data))
mww_results.data.append((task_name,) + get_mww(group1_data, group2_data))
tasks_levene_result = get_levene(group1_data, group2_data)
levene_results.data.append((task_name,) + tasks_levene_result)
if tasks_levene_result[0] > 0.05: # equal variance
ttest_results.data.append((task_name,) + get_ttest_equal_var(group1_data, group2_data) + ("equal",))
else:
ttest_results.data.append((task_name,) + get_ttest_diff_var(group1_data, group2_data) + ("diff",))
calculations.append(("task_times_sstats", sstats_results))
calculations.append(("task_times_mww_test", mww_results))
calculations.append(("task_times_ttest_test", ttest_results))
calculations.append(("task_times_levene_test", levene_results))
#### totals task times tests
groups_data = self._get_groups_in_seconds(tasks_times_array)
group1_data = groups_data[0][0] # one "dimension" assumed, as task times imply only one column
group2_data = groups_data[1][0] # one "dimension" assumed, as task times imply only one column
calculations.append(("total_task_times_sstats", ASADataSet(sstats_headers, [(self.groups[0].name,) + get_simple_stats(group1_data), (self.groups[1].name,) + get_simple_stats(group2_data)])))
calculations.append(("total_task_times_mww_test", ASADataSet(mww_headers, [get_mww(group1_data, group2_data)])))
total_levene_result = [get_levene(group1_data, group2_data)]
calculations.append(("total_task_times_levene_test", ASADataSet(levene_headers, total_levene_result)))
if total_levene_result[0] > 0.05: # equal variance
calculations.append(("total_task_times_ttest_test", ASADataSet(ttest_headers, [get_ttest_equal_var(group1_data, group2_data) + ("equal",)])))
else:
calculations.append(("total_task_times_ttest_test", ASADataSet(ttest_headers, [get_ttest_diff_var(group1_data, group2_data) + ("diff",)])))
#### totals task times tests per subject
### (i.e., times that subjects took working on the entirety of the tasks, rather than the times they took on each task)
from pandas import DataFrame
total_task_times = np.array(DataFrame([row[0:2] + row[3:] for row in tasks_times_array.tolist()]).groupby([0,1], as_index=False).aggregate(np.sum).to_records(index=False).tolist())
calculations.append(("total_task_times_persubject", ASADataSet(['group', 'user', 'duration'], total_task_times)))
groups_data = self._get_groups_in_seconds(total_task_times, columns=(2,3))
group1_data = groups_data[0][0] # one "dimension" assumed, as task times imply only one column
group2_data = groups_data[1][0] # one "dimension" assumed, as task times imply only one column
calculations.append(("total_task_times_persubject_sstats", ASADataSet(sstats_headers,
[(self.groups[0].name,) + get_simple_stats(group1_data), (self.groups[1].name,) + get_simple_stats(group2_data)])))
calculations.append(("total_task_times_persubject_mww_test", ASADataSet(mww_headers, [get_mww(group1_data, group2_data)])))
total_levene_result = [get_levene(group1_data, group2_data)]
calculations.append(("total_task_times_persubject_levene_test", ASADataSet(levene_headers, total_levene_result)))
if total_levene_result[0] > 0.05: # equal variance
calculations.append(("total_task_times_persubject_ttest_test", ASADataSet(ttest_headers, [get_ttest_equal_var(group1_data, group2_data) + ("equal",)])))
else:
calculations.append(("total_task_times_persubject_ttest_test", ASADataSet(ttest_headers, [get_ttest_diff_var(group1_data, group2_data) + ("diff",)])))
#### activity tests
# [group, user, wiki_view, wiki_edit, search, asa_artifact_view, asa_artifact_edit, asa_index, asa_search]
groups_data = self._get_groups_in_seconds(activity_times_array, columns=(2,None))
group1_data = groups_data[0]
group2_data = groups_data[1]
intermediate_calcs = {
"activity_times_sstats": ASADataSet(sstats_headers[:1] + ['activity'] + sstats_headers[1:], []),
"activity_times_shapiro_test": ASADataSet(shapiro_headers, []),
"activity_times_mww_test": ASADataSet(['activity'] + mww_headers, []),
"activity_times_ttest_test": ASADataSet(['activity'] + ttest_headers, []),
"activity_times_levene_test": ASADataSet(['activity'] + levene_headers, []),
}
for measurement_id, measurement in Measurements.to_ids_list_that_matter():
intermediate_calcs["activity_times_sstats"].data.extend(
[
(self.groups[0].name, measurement) + get_simple_stats(group1_data[measurement_id]),
(self.groups[1].name, measurement) + get_simple_stats(group2_data[measurement_id])
]
)
import warnings
with warnings.catch_warnings(record=True) as w: # catch warnings
intermediate_calcs["activity_times_shapiro_test"].data.append(
(self.groups[0].name, measurement) + get_shapiro(group1_data[measurement_id])
)
if len(w) > 0:
print '\x1b[31m' + "\n... Warning running shapiro-wilk on '{0}' for group '{1}': {2}".format(measurement, self.groups[0].name, w[-1].message) + '\033[0m'
with warnings.catch_warnings(record=True) as w: # catch warnings
intermediate_calcs["activity_times_shapiro_test"].data.append(
(self.groups[1].name, measurement) + get_shapiro(group2_data[measurement_id])
)
if len(w) > 0:
print '\x1b[31m' + "\n... Warning running shapiro-wilk on '{0}' for group '{1}': {2}".format(measurement, self.groups[1].name, w[-1].message) + '\033[0m'
try:
intermediate_calcs["activity_times_mww_test"].data.append(
(measurement,) + get_mww(group1_data[measurement_id], group2_data[measurement_id])
)
except ValueError:
# get_mww() returns a ValueError when the values on both groups are the same
print "MWW raised a ValueError. Values on both groups are the same?"
intermediate_calcs["activity_times_mww_test"].data.append((measurement, None, None))
activities_levene_result = get_levene(group1_data[measurement_id], group2_data[measurement_id])
intermediate_calcs["activity_times_levene_test"].data.append(
(measurement,) + activities_levene_result
)
if activities_levene_result[0] > 0.05: # equal variance
intermediate_calcs["activity_times_ttest_test"].data.append(
(measurement,) + get_ttest_equal_var(group1_data[measurement_id], group2_data[measurement_id]) + ("equal",)
)
else:
intermediate_calcs["activity_times_ttest_test"].data.append(
(measurement,) + get_ttest_diff_var(group1_data[measurement_id], group2_data[measurement_id]) + ("diff",)
)
measurement_id += 1
for icalc_tpl in intermediate_calcs.iteritems():
calculations.append(icalc_tpl)
#### activity times by issue tests
intermediate_calcs = {
"issues_activity_times": ASADataSet(['group', 'user', 'duration_i2', 'duration_i6'], []),
"issues_activity_times_sstats": ASADataSet(sstats_headers[:1] + ['issue'] + sstats_headers[1:], []),
"issues_activity_times_mww_test": ASADataSet(['issue'] + mww_headers, []),
"issues_activity_times_levene_test": ASADataSet(['issue'] + levene_headers, []),
"issues_activity_times_ttest_test": ASADataSet(['issue'] + ttest_headers, [])
}
issues_activity_times = np.array([np.concatenate((row[0:2], [sum(row[[2,3,5,6]], timedelta())], [sum(row[[4,7,8]], timedelta())])) for row in activity_times_array]).tolist()
intermediate_calcs["issues_activity_times"].data.extend(issues_activity_times)
groups_data = self._get_groups_in_seconds(np.array(issues_activity_times), (2, None))
for idx, name in [(0, "understanding"), (1, "finding")]:
group1_data = groups_data[0][idx]
group2_data = groups_data[1][idx]
intermediate_calcs["issues_activity_times_sstats"].data.extend(
[(self.groups[0].name, name) + get_simple_stats(group1_data),
(self.groups[1].name, name) + get_simple_stats(group2_data)]
)
intermediate_calcs["issues_activity_times_mww_test"].data.extend(
[(name,) + get_mww(group1_data, group2_data)]
)
issues_levene_result = get_levene(group1_data, group2_data)
intermediate_calcs["issues_activity_times_levene_test"].data.extend(
[(name,) + issues_levene_result]
)
if issues_levene_result[0] > 0.05: # equal variance
intermediate_calcs["issues_activity_times_ttest_test"].data.extend(
[(name,) + get_ttest_equal_var(group1_data, group2_data) + ("equal",)]
)
else:
intermediate_calcs["issues_activity_times_ttest_test"].data.extend(
[(name,) + get_ttest_equal_var(group1_data, group2_data) + ("diff",)]
)
for icalc_tpl in intermediate_calcs.iteritems():
calculations.append(icalc_tpl)
#### totals activity times tests
total_activity_times = np.array([np.concatenate((row[0:2], [sum(row[2:], timedelta())])) for row in activity_times_array]).tolist()
calculations.append(("total_activity_times", ASADataSet(['group', 'user', 'duration'], total_activity_times)))
groups_data = self._get_groups_in_seconds(np.array(total_activity_times), (2, None))
group1_data = groups_data[0][0]
group2_data = groups_data[1][0]
calculations.append(("total_activity_times_sstats", ASADataSet(sstats_headers,
[(self.groups[0].name,) + get_simple_stats(group1_data), (self.groups[1].name,) + get_simple_stats(group2_data)])))
calculations.append(("total_activity_times_mww_test", ASADataSet(mww_headers, [get_mww(group1_data, group2_data)])))
total_levene_result = get_levene(group1_data, group2_data)
calculations.append(("total_activity_times_levene_test", ASADataSet(levene_headers, [total_levene_result])))
if total_levene_result[0] > 0.05: # equal variance
calculations.append(("total_activity_times_ttest_test", ASADataSet(ttest_headers, [get_ttest_equal_var(group1_data, group2_data) + ("equal",)])))
else:
calculations.append(("total_activity_times_ttest_test", ASADataSet(ttest_headers, [get_ttest_diff_var(group1_data, group2_data) + ("diff",)])))
# questionnaires
questionnaire_questions = ASADataSet(['question_number', 'question'], self.get_questionnaire_questions())
questionnaire_hypothesis = ASADataSet(['question_number', 'hypothesis'], self.get_questionnaire_hypothesis())
questionnaire_histogram = ASADataSet(['group', 'question', '1', '2', '3', '4', '5'], [])
questionnaire_one_answer_per_row = ASADataSet(['group', 'user', 'question', 'answer'], [])
questionnaire_one_answer_per_column = ASADataSet([], [])
questionnaire_sstats = ASADataSet(['question'] + sstats_headers, [])
questionnaire_mww_results = ASADataSet(['question'] + mww_headers, [])
questionnaire_ttest_results = ASADataSet(['question'] + ttest_headers, [])
questionnaire_levene_results = ASADataSet(['question'] + levene_headers, [])
def get_question_and_answers(questionnaire_row):
question = questionnaire_row[0]
answers = questionnaire_row[1:-6]
if type(answers[0]) is float: # discard questions with a non-numeric answer
answers = [int(answer) if type(answer) is float else answer for answer in answers] # floats become ints
answers_noned = [a if not a == "" else None for a in answers] # replace missing data values with None
answers = [a for a in answers if not a == ""] # discard missing data values
return question, answers, answers_noned
return question, None, None
group1_name = self.groups[0].name
group2_name = self.groups[1].name
group1_subjects = self.groups[0].get_questionnaire_subjects()
group1_data = self.groups[0].get_questionnaire_questions_and_answers()
group2_subjects = self.groups[1].get_questionnaire_subjects()
group2_data = self.groups[1].get_questionnaire_questions_and_answers()
questionnaire_one_answer_per_column.headers = ['question'] + group1_subjects + group2_subjects
if not group1_data is None:
for i in range(len(group1_data)): # for each question
question_g1, answers_g1, answers_g1_noned = get_question_and_answers(group1_data[i])
question_g2, answers_g2, answers_g2_noned = get_question_and_answers(group2_data[i])
assert question_g1 == question_g2
if answers_g1 is None or answers_g2 is None:
continue
for i in range(len(group1_subjects)):
if not answers_g1_noned[i] is None:
questionnaire_one_answer_per_row.data.append((group1_name, group1_subjects[i], question_g1, answers_g1_noned[i]))
for i in range(len(group2_subjects)):
if not answers_g2_noned[i] is None:
questionnaire_one_answer_per_row.data.append((group2_name, group2_subjects[i], question_g2, answers_g2_noned[i]))
questionnaire_one_answer_per_column.data.append((question_g1,) + tuple(answers_g1_noned + answers_g2_noned))
questionnaire_histogram.data.append((group1_name, question_g1) + tuple(np.bincount(np.array(answers_g1), minlength=6)[1:]))
questionnaire_histogram.data.append((group2_name, question_g2) + tuple(np.bincount(np.array(answers_g2), minlength=6)[1:]))
questionnaire_sstats.data.append((question_g1, group1_name) + get_simple_stats(answers_g1))
questionnaire_sstats.data.append((question_g2, group2_name) + get_simple_stats(answers_g2))
questionnaire_mww_results.data.append((question_g1,) + get_mww(answers_g1, answers_g2))
quest_levene_result = get_levene(answers_g1, answers_g2)
questionnaire_levene_results.data.append((question_g1,) + quest_levene_result)
if quest_levene_result[0] > 0.05: # equal variance
questionnaire_ttest_results.data.append((question_g1,) + get_ttest_equal_var(answers_g1, answers_g2) + ("equal",))
else:
questionnaire_ttest_results.data.append((question_g1,) + get_ttest_diff_var(answers_g1, answers_g2) + ("diff",))
calculations.append(("questionnaire_questions", questionnaire_questions))
calculations.append(("questionnaire_hypothesis", questionnaire_hypothesis))
calculations.append(("questionnaire_histogram", questionnaire_histogram))
calculations.append(("questionnaire_one_answer_per_row", questionnaire_one_answer_per_row))
calculations.append(("questionnaire_one_answer_per_column", questionnaire_one_answer_per_column))
calculations.append(("questionnaire_sstats", questionnaire_sstats))
calculations.append(("questionnaire_mww_test", questionnaire_mww_results))
calculations.append(("questionnaire_levene_test", questionnaire_levene_results))
calculations.append(("questionnaire_ttest_test", questionnaire_ttest_results))
stdout.write(" [finished]\n")
stdout.flush()
return ASAExperimentCalculations(self, calculations)
class ASAExperimentGroup(object):
def __init__(self, name, db_dataset, xls_data, date, questionnaire_data):
"""
Combines the data from the database with that obtained from
the xls file, to create an instance of ASAExperimentGroup
"""
self.name = name
self.users = set()
self.tasks = set()
self.timespans = {} # {(user,task): (start_time,end_time)}
self.task_durations = [] # (user, task, duration)
self.activity_times = [] # (user, activity_type, duration)
self.transposed_activity_times = [] # (user, activity1, activity2, activity3, ...)
self.db_dataset = db_dataset # [[id, resource_type, resource_id, operation, username, time_started, time_ended]]
self.questionnaire_data = questionnaire_data
# process XLS data
group_end_time = max([row[3] for row in xls_data])
for row in xls_data:
self.users.add(row[0])
self.tasks.add(row[1])
assert(not (row[0], row[1]) in self.timespans) # finding duplicate tasks for the same user means something went wrong...
if row[4] in ("yes", "partial"): # only account for completed tasks
self.timespans[(row[0], row[1])] = (
"%s %s:00.0" % (date, row[2]),
"%s %s:00.0" % (date, row[3])
)
self.task_durations.append((row[0], row[1],
parse_date("%s %s:00.0" % (date, row[3])) -
parse_date("%s %s:00.0" % (date, row[2]))
))
else:
if not CONSIDER_ONLY_COMPLETED_TASKS: # make uncompleted tasks take up the rest of the available time
if not row[2] == '':
self.timespans[(row[0], row[1])] = (
"%s %s:00.0" % (date, row[2]),
"%s %s:00.0" % (date, group_end_time)
)
self.task_durations.append((row[0], row[1],
parse_date("%s %s:00.0" % (date, group_end_time)) -
parse_date("%s %s:00.0" % (date, row[2]))
))
# Process DB data (needs refactoring)
stats_wiki = self.db_dataset.filter_by_resource_type(ResourceTypes.wiki)
stats_wiki_view = stats_wiki.filter_by_operation("view").aggregate_timedeltas((1,3,4))
stats_wiki_edit = stats_wiki.filter_by_operation("edit").aggregate_timedeltas((1,3,4))
stats_search = self.db_dataset.filter_by_resource_type(ResourceTypes.search).aggregate_timedeltas((1,3,4))
stats_asa_artifact = self.db_dataset.filter_by_resource_type(ResourceTypes.asa_artifact)
stats_asa_artifact_view = stats_asa_artifact.filter_by_operation("view").aggregate_timedeltas((1,3,4))
stats_asa_artifact_edit = stats_asa_artifact.filter_by_operation("edit").aggregate_timedeltas((1,3,4))
stats_asa_index = self.db_dataset.filter_by_resource_type(ResourceTypes.asa_index).aggregate_timedeltas((1,3,4))
stats_asa_search = self.db_dataset.filter_by_resource_type(ResourceTypes.asa_search).aggregate_timedeltas((1,3,4))
activity_times = []
for collection, value_type in [
(stats_wiki_view, Measurements.wiki_view),
(stats_wiki_edit, Measurements.wiki_edit),
(stats_search, Measurements.search),
(stats_asa_artifact_view, Measurements.asa_artifact_view),
(stats_asa_artifact_edit, Measurements.asa_artifact_edit),
(stats_asa_index, Measurements.asa_index),
(stats_asa_search, Measurements.asa_search)]:
activity_times.extend(collection.delete_columns((0,1)).insert_column(1, "activity", value_type).data)
self.activity_times.extend(activity_times)
self.transposed_activity_times.extend(self._transpose_activity_times(activity_times))
def _transpose_activity_times(self, activity_times):
def get_duration_for_user_and_activity(user, activity_type):
for row in activity_times:
if row[0] == user and row[1] == activity_type:
return row[2]
return timedelta(0)
blanked_and_ordered_activity_times = []
import numpy as np
for user in set(np.array(activity_times)[:,0]): # unique users
for activity_type in Measurements.to_list():
blanked_and_ordered_activity_times.append([user, activity_type, get_duration_for_user_and_activity(user, activity_type)])
transposed_activity_times = [[user] + np.array(list(row)).transpose().tolist()[2:][0] for user,row in groupby(blanked_and_ordered_activity_times, lambda x: x[0])]
return transposed_activity_times
def get_times_for_user_and_task(self, username, taskname):
if not (username, taskname) in self.timespans:
return None
# translate taskname to a start and end time
start_time, end_time = self.timespans[(username, taskname)]
by_user_and_date = self.db_dataset.filter_by_username(username).filter_by_date_interval(start_time, end_time)
stats_wiki = by_user_and_date.filter_by_resource_type(ResourceTypes.wiki)
stats_wiki_view = stats_wiki.filter_by_operation("view").aggregate_timedeltas((1,3,4))
stats_wiki_edit = stats_wiki.filter_by_operation("edit").aggregate_timedeltas((1,3,4))
stats_search = by_user_and_date.filter_by_resource_type(ResourceTypes.search).aggregate_timedeltas((1,3,4))
stats_asa_artifact = by_user_and_date.filter_by_resource_type(ResourceTypes.asa_artifact)
stats_asa_artifact_view = stats_asa_artifact.filter_by_operation("view").aggregate_timedeltas((1,3,4))
stats_asa_artifact_edit = stats_asa_artifact.filter_by_operation("edit").aggregate_timedeltas((1,3,4))
stats_asa_index = by_user_and_date.filter_by_resource_type(ResourceTypes.asa_index).aggregate_timedeltas((1,3,4))
stats_asa_search = by_user_and_date.filter_by_resource_type(ResourceTypes.asa_search).aggregate_timedeltas((1,3,4))
assert(len(stats_wiki_view.data) <= 1)
assert(len(stats_wiki_edit.data) <= 1)
assert(len(stats_search.data) <= 1)
assert(len(stats_asa_artifact_view.data) <= 1)
assert(len(stats_asa_artifact_edit.data) <= 1)
assert(len(stats_asa_index.data) <= 1)
assert(len(stats_asa_search.data)<= 1)
def ensure_not_empty(asa_dataset):
return asa_dataset.data[0][3] if len(asa_dataset.data) == 1 else None
return [
(Measurements.wiki_view, ensure_not_empty(stats_wiki_view)),
(Measurements.wiki_edit, ensure_not_empty(stats_wiki_edit)),
(Measurements.search, ensure_not_empty(stats_search)),
(Measurements.asa_artifact_view, ensure_not_empty(stats_asa_artifact_view)),
(Measurements.asa_artifact_edit, ensure_not_empty(stats_asa_artifact_edit)),
(Measurements.asa_index, ensure_not_empty(stats_asa_index)),
(Measurements.asa_search, ensure_not_empty(stats_asa_search))
]
def get_times_for_all_users_and_tasks(self):
#replaces Nones with 0:00:00
return [tuple(value if not value is None else timedelta(0) for value in row) for row in self._get_times_for_all_users_and_tasks()]
def _get_times_for_all_users_and_tasks(self):
all_times = []
for user in self.users:
for task in self.tasks:
stats = self.get_times_for_user_and_task(user, task)
if not stats is None:
all_times.append((user, task) + tuple(v for k,v in stats))
return all_times
def get_task_times(self):
return self.task_durations
def get_activity_times(self):
return self.transposed_activity_times
def _sum_times(self, times):
non_nones = self._non_nones(times)
return sum(non_nones, timedelta()) if len(non_nones) > 0 else None
def _count_times(self, times):
return len(self._non_nones(times))
def _non_nones(self, times):
return [t for t in times if not t is None]
def _calc_sum_avg_std(self, values):
def timedelta_avg(vals):
return self._sum_times(vals)/self._count_times(vals)
if self._count_times(values) == 0:
return (None, None, None)
total = self._sum_times(values)
avg = timedelta_avg(values)
variance = map(lambda x: timedelta(seconds=math.pow(total_seconds(x - avg),2)), self._non_nones(values))
std = timedelta(seconds=math.sqrt(total_seconds(timedelta_avg(variance))))
return (total, avg, std)
def get_questionnaire_subjects(self):
if self.questionnaire_data is None: return None
if len(self.questionnaire_data)==0: return []
return self.questionnaire_data[1][0][1:-6]
def get_questionnaire_questions_and_answers(self):
if self.questionnaire_data is None: return None
if len(self.questionnaire_data)==0: return []
return self.questionnaire_data[1][1:]
class ASAExperimentCalculations(object):
def __init__(self, experiment, calculations):
"""
:param experiment: The experiment that the calculations refer to
:param calculations: List of tuples with the results of all the executed calculations. These results were calculated in the same order in which they are stored, with some of them being based on previous ones.
"""
self.experiment = experiment
self.calculations = calculations
def as_tuples_list(self):
"""
Exposes the internal calculations data
"""
return self.calculations
def as_dict(self):
return dict(self.calculations)
def get_calculation(self, calc_type, sort_key=None):
import numpy as np
if sort_key is None:
return np.array(dict(self.calculations)[calc_type].data)
else:
return np.array(sorted(dict(self.calculations)[calc_type].data, key=sort_key)) #,dtype=('a2,a20,f8,f8,f8,f8,f8,f8')
class ASADataSet(object):
def __init__(self, headers, iterable_data):
"""
headers: a list of headers for the data
iterable_data: an interable of tuples
"""
self.headers = headers
self.data = iterable_data
@staticmethod
def sum_timedeltas(data):
summed_deltas = timedelta()
for row in data:
summed_deltas += parse_date(row[6]) - parse_date(row[5])
return summed_deltas
def delete_columns(self, column_range):
def delete_from_list(data, column_range):
return [row[0:column_range[0]] + row[column_range[1]+1:] for row in data]
return ASADataSet(delete_from_list([self.headers], column_range)[0], delete_from_list(self.data, column_range))
def insert_column(self, index, column_name, default_value):
def insert_in_list(data, index, default_value):
return [row[0:index] + (default_value,) + row[index:] for row in data]
return ASADataSet(insert_in_list([tuple(self.headers)], index, column_name)[0], insert_in_list(self.data, index, default_value))
class ASADBDataSet(ASADataSet):
def __init__(self, iterable_data):
super(ASADBDataSet, self).__init__(
['id', 'resource_type', 'resource_id', 'operation', 'username', 'time_started', 'time_ended'],
iterable_data
)
def aggregate_timedeltas(self, col_ids, aggr_func=None):
"""
col_ids is the list of column indices that should be aggregated. The aggregation function
can be specified, but is otherwise sum(), and always acts over the time columns. Please
note that index numbers follow this order:
id, resource_type, resource_id, operation, username, time_started, time_ended
"""
if aggr_func is None: aggr_func = ASADataSet.sum_timedeltas
def set_keys(*indices):
"""Returns a function that returns a tuple of key values"""
def get_keys(seq, indices=indices):
keys = []
for i in indices:
keys.append(seq[i])
return tuple(keys)
return get_keys
keyfunc = set_keys(*col_ids)
aggregated = []
for k,v in groupby(self.data, key=keyfunc):
aggregated.append(tuple(list(k) + [aggr_func(v)]))
return ASADataSet(
['resource_type', 'operation', 'username', 'durantion'],
aggregated)
def filter_by_username(self, username):
if not type(username) in (list, set):
username = [username]
return ASADBDataSet([row for row in self.data if row[4] in username])
def filter_by_operation(self, operation):
return ASADBDataSet([row for row in self.data if row[3] == operation])
def filter_by_resource_type(self, resource_type):
return ASADBDataSet([row for row in self.data if row[1] == resource_type])
def filter_by_date_interval(self, start_time, end_time):
return ASADBDataSet([row for row in self.data if parse_date(start_time) <= parse_date(row[5]) < parse_date(end_time)]) # and parse_date(start_time) < parse_date(row[6]) < parse_date(end_time)
|
mit
|
joopert/home-assistant
|
homeassistant/components/kira/__init__.py
|
19
|
4242
|
"""KIRA interface to receive UDP packets from an IR-IP bridge."""
import logging
import os
import pykira
import voluptuous as vol
from voluptuous.error import Error as VoluptuousError
import yaml
from homeassistant.const import (
CONF_CODE,
CONF_DEVICE,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SENSORS,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
DOMAIN = "kira"
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "0.0.0.0"
DEFAULT_PORT = 65432
CONF_REPEAT = "repeat"
CONF_REMOTES = "remotes"
CONF_SENSOR = "sensor"
CONF_REMOTE = "remote"
CODES_YAML = f"{DOMAIN}_codes.yaml"
CODE_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CODE): cv.string,
vol.Optional(CONF_TYPE): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_REPEAT): cv.positive_int,
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): vol.Exclusive(cv.string, "sensors"),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
REMOTE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): vol.Exclusive(cv.string, "remotes"),
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SENSORS): [SENSOR_SCHEMA],
vol.Optional(CONF_REMOTES): [REMOTE_SCHEMA],
}
)
},
extra=vol.ALLOW_EXTRA,
)
def load_codes(path):
"""Load KIRA codes from specified file."""
codes = []
if os.path.exists(path):
with open(path) as code_file:
data = yaml.safe_load(code_file) or []
for code in data:
try:
codes.append(CODE_SCHEMA(code))
except VoluptuousError as exception:
# keep going
_LOGGER.warning("KIRA code invalid data: %s", exception)
else:
with open(path, "w") as code_file:
code_file.write("")
return codes
def setup(hass, config):
"""Set up the KIRA component."""
sensors = config.get(DOMAIN, {}).get(CONF_SENSORS, [])
remotes = config.get(DOMAIN, {}).get(CONF_REMOTES, [])
# If no sensors or remotes were specified, add a sensor
if not (sensors or remotes):
sensors.append({})
codes = load_codes(hass.config.path(CODES_YAML))
hass.data[DOMAIN] = {CONF_SENSOR: {}, CONF_REMOTE: {}}
def load_module(platform, idx, module_conf):
"""Set up the KIRA module and load platform."""
# note: module_name is not the HA device name. it's just a unique name
# to ensure the component and platform can share information
module_name = ("%s_%d" % (DOMAIN, idx)) if idx else DOMAIN
device_name = module_conf.get(CONF_NAME, DOMAIN)
port = module_conf.get(CONF_PORT, DEFAULT_PORT)
host = module_conf.get(CONF_HOST, DEFAULT_HOST)
if platform == CONF_SENSOR:
module = pykira.KiraReceiver(host, port)
module.start()
else:
module = pykira.KiraModule(host, port)
hass.data[DOMAIN][platform][module_name] = module
for code in codes:
code_tuple = (code.get(CONF_NAME), code.get(CONF_DEVICE, STATE_UNKNOWN))
module.registerCode(code_tuple, code.get(CONF_CODE))
discovery.load_platform(
hass, platform, DOMAIN, {"name": module_name, "device": device_name}, config
)
for idx, module_conf in enumerate(sensors):
load_module(CONF_SENSOR, idx, module_conf)
for idx, module_conf in enumerate(remotes):
load_module(CONF_REMOTE, idx, module_conf)
def _stop_kira(_event):
"""Stop the KIRA receiver."""
for receiver in hass.data[DOMAIN][CONF_SENSOR].values():
receiver.stop()
_LOGGER.info("Terminated receivers")
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_kira)
return True
|
apache-2.0
|
savoirfairelinux/ring-daemon
|
doc/dbus-api/tools/xincludator.py
|
30
|
1291
|
#!/usr/bin/python
from sys import argv, stdout, stderr
import codecs, locale
import os
import xml.dom.minidom
stdout = codecs.getwriter('utf-8')(stdout)
NS_XI = 'http://www.w3.org/2001/XInclude'
def xincludate(dom, base, dropns = []):
remove_attrs = []
for i in xrange(dom.documentElement.attributes.length):
attr = dom.documentElement.attributes.item(i)
if attr.prefix == 'xmlns':
if attr.localName in dropns:
remove_attrs.append(attr)
else:
dropns.append(attr.localName)
for attr in remove_attrs:
dom.documentElement.removeAttributeNode(attr)
for include in dom.getElementsByTagNameNS(NS_XI, 'include'):
href = include.getAttribute('href')
# FIXME: assumes Unixy paths
filename = os.path.join(os.path.dirname(base), href)
subdom = xml.dom.minidom.parse(filename)
xincludate(subdom, filename, dropns)
if './' in href:
subdom.documentElement.setAttribute('xml:base', href)
include.parentNode.replaceChild(subdom.documentElement, include)
if __name__ == '__main__':
argv = argv[1:]
dom = xml.dom.minidom.parse(argv[0])
xincludate(dom, argv[0])
xml = dom.toxml()
stdout.write(xml)
stdout.write('\n')
|
gpl-3.0
|
ZachMassia/platformio
|
platformio/builder/scripts/nxplpc.py
|
3
|
1885
|
# Copyright 2014-2016 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Builder for NXP LPC series ARM microcontrollers.
"""
from os.path import join
from shutil import copyfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
def UploadToDisk(target, source, env): # pylint: disable=W0613,W0621
env.AutodetectUploadPort()
copyfile(join(env.subst("$BUILD_DIR"), "firmware.bin"),
join(env.subst("$UPLOAD_PORT"), "firmware.bin"))
print("Firmware has been successfully uploaded.\n"
"Please restart your board.")
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "basearm.py")))
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildProgram()
#
# Target: Build the .bin file
#
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.bin")
else:
target_firm = env.ElfToBin(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload = env.Alias(["upload", "uploadlazy"], target_firm, UploadToDisk)
AlwaysBuild(upload)
#
# Target: Define targets
#
Default([target_firm, target_size])
|
apache-2.0
|
jmcorgan/gnuradio
|
gr-digital/python/digital/qa_ofdm_sync_sc_cfb.py
|
55
|
7134
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import random
from gnuradio import gr, gr_unittest, blocks, analog, channels
from gnuradio import digital
from gnuradio.digital.utils import tagged_streams
from gnuradio.digital.ofdm_txrx import ofdm_tx
class qa_ofdm_sync_sc_cfb (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_detect (self):
""" Send two bursts, with zeros in between, and check
they are both detected at the correct position and no
false alarms occur """
n_zeros = 15
fft_len = 32
cp_len = 4
sig_len = (fft_len + cp_len) * 10
sync_symbol = [(random.randint(0, 1)*2)-1 for x in range(fft_len/2)] * 2
tx_signal = [0,] * n_zeros + \
sync_symbol[-cp_len:] + \
sync_symbol + \
[(random.randint(0, 1)*2)-1 for x in range(sig_len)]
tx_signal = tx_signal * 2
add = blocks.add_cc()
sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len)
sink_freq = blocks.vector_sink_f()
sink_detect = blocks.vector_sink_b()
self.tb.connect(blocks.vector_source_c(tx_signal), (add, 0))
self.tb.connect(analog.noise_source_c(analog.GR_GAUSSIAN, .01), (add, 1))
self.tb.connect(add, sync)
self.tb.connect((sync, 0), sink_freq)
self.tb.connect((sync, 1), sink_detect)
self.tb.run()
sig1_detect = sink_detect.data()[0:len(tx_signal)/2]
sig2_detect = sink_detect.data()[len(tx_signal)/2:]
self.assertTrue(abs(sig1_detect.index(1) - (n_zeros + fft_len + cp_len)) < cp_len)
self.assertTrue(abs(sig2_detect.index(1) - (n_zeros + fft_len + cp_len)) < cp_len)
self.assertEqual(numpy.sum(sig1_detect), 1)
self.assertEqual(numpy.sum(sig2_detect), 1)
def test_002_freq (self):
""" Add a fine frequency offset and see if that get's detected properly """
fft_len = 32
cp_len = 4
# This frequency offset is normalized to rads, i.e. \pi == f_s/2
max_freq_offset = 2*numpy.pi/fft_len # Otherwise, it's coarse
freq_offset = ((2 * random.random()) - 1) * max_freq_offset
sig_len = (fft_len + cp_len) * 10
sync_symbol = [(random.randint(0, 1)*2)-1 for x in range(fft_len/2)] * 2
tx_signal = sync_symbol[-cp_len:] + \
sync_symbol + \
[(random.randint(0, 1)*2)-1 for x in range(sig_len)]
sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len, True)
sink_freq = blocks.vector_sink_f()
sink_detect = blocks.vector_sink_b()
channel = channels.channel_model(0.005, freq_offset / 2.0 / numpy.pi)
self.tb.connect(blocks.vector_source_c(tx_signal), channel, sync)
self.tb.connect((sync, 0), sink_freq)
self.tb.connect((sync, 1), sink_detect)
self.tb.run()
phi_hat = sink_freq.data()[sink_detect.data().index(1)]
est_freq_offset = 2 * phi_hat / fft_len
self.assertAlmostEqual(est_freq_offset, freq_offset, places=2)
def test_003_multiburst (self):
""" Send several bursts, see if the number of detects is correct.
Burst lengths and content are random.
"""
n_bursts = 42
fft_len = 32
cp_len = 4
tx_signal = []
for i in xrange(n_bursts):
sync_symbol = [(random.randint(0, 1)*2)-1 for x in range(fft_len/2)] * 2
tx_signal += [0,] * random.randint(0, 2*fft_len) + \
sync_symbol[-cp_len:] + \
sync_symbol + \
[(random.randint(0, 1)*2)-1 for x in range(fft_len * random.randint(5,23))]
add = blocks.add_cc()
sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len)
sink_freq = blocks.vector_sink_f()
sink_detect = blocks.vector_sink_b()
channel = channels.channel_model(0.005)
self.tb.connect(blocks.vector_source_c(tx_signal), channel, sync)
self.tb.connect((sync, 0), sink_freq)
self.tb.connect((sync, 1), sink_detect)
self.tb.run()
n_bursts_detected = numpy.sum(sink_detect.data())
# We allow for one false alarm or missed burst
self.assertTrue(abs(n_bursts_detected - n_bursts) <= 1,
msg="""Because of statistics, it is possible (though unlikely)
that the number of detected bursts differs slightly. If the number of detects is
off by one or two, run the test again and see what happen.
Detection error was: %d """ % (numpy.sum(sink_detect.data()) - n_bursts)
)
def test_004_ofdm_packets (self):
"""
Send several bursts using ofdm_tx, see if the number of detects is correct.
Burst lengths and content are random.
"""
n_bursts = 42
fft_len = 64
cp_len = 16
# Here, coarse freq offset is allowed
max_freq_offset = 2*numpy.pi/fft_len * 4
freq_offset = ((2 * random.random()) - 1) * max_freq_offset
tx_signal = []
packets = []
tagname = "packet_length"
min_packet_length = 10
max_packet_length = 50
sync_sequence = [random.randint(0, 1)*2-1 for x in range(fft_len/2)]
for i in xrange(n_bursts):
packet_length = random.randint(min_packet_length,
max_packet_length+1)
packet = [random.randint(0, 255) for i in range(packet_length)]
packets.append(packet)
data, tags = tagged_streams.packets_to_vectors(packets, tagname, vlen=1)
total_length = len(data)
src = blocks.vector_source_b(data, False, 1, tags)
mod = ofdm_tx(packet_length_tag_key=tagname)
sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len)
sink_freq = blocks.vector_sink_f()
sink_detect = blocks.vector_sink_b()
noise_level = 0.005
channel = channels.channel_model(noise_level, freq_offset / 2 / numpy.pi)
self.tb.connect(src, mod, channel, sync, sink_freq)
self.tb.connect((sync, 1), sink_detect)
self.tb.run()
self.assertEqual(numpy.sum(sink_detect.data()), n_bursts)
if __name__ == '__main__':
gr_unittest.run(qa_ofdm_sync_sc_cfb, "qa_ofdm_sync_sc_cfb.xml")
|
gpl-3.0
|
madjelan/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
axelspringer/ansible-modules-core
|
cloud/rackspace/rax_files_objects.py
|
41
|
18551
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects: container=testcont dest=~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects: container=testcont method=delete dest=file1
- name: "Delete several objects in test container"
rax_files_objects: container=testcont method=delete dest=file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects: container=testcont method=delete
- name: "Upload all files to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: [email protected]
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60
- name: "Attempt to get remote object that does not exist"
rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects: container=testcont type=meta dest=file2
- name: "Get metadata on several objects"
rax_files_objects: container=testcont type=meta src=file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects: container=testcont type=meta src=file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects: container=testcont type=meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
main()
|
gpl-3.0
|
pducks32/intergrala
|
python/sympy/sympy/physics/quantum/tests/test_gate.py
|
28
|
11621
|
from sympy import exp, symbols, sqrt, I, pi, Mul, Integer, Wild
from sympy.matrices import Matrix
from sympy.physics.quantum.gate import (XGate, YGate, ZGate, random_circuit,
CNOT, IdentityGate, H, X, Y, S, T, Z, SwapGate, gate_simp, gate_sort,
CNotGate, TGate, HadamardGate, PhaseGate, UGate, CGate)
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qubit import Qubit, IntQubit, qubit_to_matrix, \
matrix_to_qubit
from sympy.physics.quantum.matrixutils import matrix_to_zero
from sympy.physics.quantum.matrixcache import sqrt2_inv
from sympy.physics.quantum import Dagger
def test_gate():
"""Test a basic gate."""
h = HadamardGate(1)
assert h.min_qubits == 2
assert h.nqubits == 1
i0 = Wild('i0')
i1 = Wild('i1')
h0_w1 = HadamardGate(i0)
h0_w2 = HadamardGate(i0)
h1_w1 = HadamardGate(i1)
assert h0_w1 == h0_w2
assert h0_w1 != h1_w1
assert h1_w1 != h0_w2
cnot_10_w1 = CNOT(i1, i0)
cnot_10_w2 = CNOT(i1, i0)
cnot_01_w1 = CNOT(i0, i1)
assert cnot_10_w1 == cnot_10_w2
assert cnot_10_w1 != cnot_01_w1
assert cnot_10_w2 != cnot_01_w1
def test_UGate():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
# Test basic case where gate exists in 1-qubit space
u1 = UGate((0,), uMat)
assert represent(u1, nqubits=1) == uMat
assert qapply(u1*Qubit('0')) == a*Qubit('0') + c*Qubit('1')
assert qapply(u1*Qubit('1')) == b*Qubit('0') + d*Qubit('1')
# Test case where gate exists in a larger space
u2 = UGate((1,), uMat)
u2Rep = represent(u2, nqubits=2)
for i in range(4):
assert u2Rep*qubit_to_matrix(IntQubit(i, 2)) == \
qubit_to_matrix(qapply(u2*IntQubit(i, 2)))
def test_cgate():
"""Test the general CGate."""
# Test single control functionality
CNOTMatrix = Matrix(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
assert represent(CGate(1, XGate(0)), nqubits=2) == CNOTMatrix
# Test multiple control bit functionality
ToffoliGate = CGate((1, 2), XGate(0))
assert represent(ToffoliGate, nqubits=3) == \
Matrix(
[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,
1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
ToffoliGate = CGate((3, 0), XGate(1))
assert qapply(ToffoliGate*Qubit('1001')) == \
matrix_to_qubit(represent(ToffoliGate*Qubit('1001'), nqubits=4))
assert qapply(ToffoliGate*Qubit('0000')) == \
matrix_to_qubit(represent(ToffoliGate*Qubit('0000'), nqubits=4))
CYGate = CGate(1, YGate(0))
CYGate_matrix = Matrix(
((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 0, -I), (0, 0, I, 0)))
# Test 2 qubit controlled-Y gate decompose method.
assert represent(CYGate.decompose(), nqubits=2) == CYGate_matrix
CZGate = CGate(0, ZGate(1))
CZGate_matrix = Matrix(
((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, -1)))
assert qapply(CZGate*Qubit('11')) == -Qubit('11')
assert matrix_to_qubit(represent(CZGate*Qubit('11'), nqubits=2)) == \
-Qubit('11')
# Test 2 qubit controlled-Z gate decompose method.
assert represent(CZGate.decompose(), nqubits=2) == CZGate_matrix
CPhaseGate = CGate(0, PhaseGate(1))
assert qapply(CPhaseGate*Qubit('11')) == \
I*Qubit('11')
assert matrix_to_qubit(represent(CPhaseGate*Qubit('11'), nqubits=2)) == \
I*Qubit('11')
# Test that the dagger, inverse, and power of CGate is evaluated properly
assert Dagger(CZGate) == CZGate
assert pow(CZGate, 1) == Dagger(CZGate)
assert Dagger(CZGate) == CZGate.inverse()
assert Dagger(CPhaseGate) != CPhaseGate
assert Dagger(CPhaseGate) == CPhaseGate.inverse()
assert Dagger(CPhaseGate) == pow(CPhaseGate, -1)
assert pow(CPhaseGate, -1) == CPhaseGate.inverse()
def test_UGate_CGate_combo():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
cMat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, a, b], [0, 0, c, d]])
# Test basic case where gate exists in 1-qubit space.
u1 = UGate((0,), uMat)
cu1 = CGate(1, u1)
assert represent(cu1, nqubits=2) == cMat
assert qapply(cu1*Qubit('10')) == a*Qubit('10') + c*Qubit('11')
assert qapply(cu1*Qubit('11')) == b*Qubit('10') + d*Qubit('11')
assert qapply(cu1*Qubit('01')) == Qubit('01')
assert qapply(cu1*Qubit('00')) == Qubit('00')
# Test case where gate exists in a larger space.
u2 = UGate((1,), uMat)
u2Rep = represent(u2, nqubits=2)
for i in range(4):
assert u2Rep*qubit_to_matrix(IntQubit(i, 2)) == \
qubit_to_matrix(qapply(u2*IntQubit(i, 2)))
def test_represent_hadamard():
"""Test the representation of the hadamard gate."""
circuit = HadamardGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
# Check that the answers are same to within an epsilon.
assert answer == Matrix([sqrt2_inv, sqrt2_inv, 0, 0])
def test_represent_xgate():
"""Test the representation of the X gate."""
circuit = XGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([0, 1, 0, 0]) == answer
def test_represent_ygate():
"""Test the representation of the Y gate."""
circuit = YGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert answer[0] == 0 and answer[1] == I and \
answer[2] == 0 and answer[3] == 0
def test_represent_zgate():
"""Test the representation of the Z gate."""
circuit = ZGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([1, 0, 0, 0]) == answer
def test_represent_phasegate():
"""Test the representation of the S gate."""
circuit = PhaseGate(0)*Qubit('01')
answer = represent(circuit, nqubits=2)
assert Matrix([0, I, 0, 0]) == answer
def test_represent_tgate():
"""Test the representation of the T gate."""
circuit = TGate(0)*Qubit('01')
assert Matrix([0, exp(I*pi/4), 0, 0]) == represent(circuit, nqubits=2)
def test_compound_gates():
"""Test a compound gate representation."""
circuit = YGate(0)*ZGate(0)*XGate(0)*HadamardGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([I/sqrt(2), I/sqrt(2), 0, 0]) == answer
def test_cnot_gate():
"""Test the CNOT gate."""
circuit = CNotGate(1, 0)
assert represent(circuit, nqubits=2) == \
Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
circuit = circuit*Qubit('111')
assert matrix_to_qubit(represent(circuit, nqubits=3)) == \
qapply(circuit)
circuit = CNotGate(1, 0)
assert Dagger(circuit) == circuit
assert Dagger(Dagger(circuit)) == circuit
assert circuit*circuit == 1
def test_gate_sort():
"""Test gate_sort."""
for g in (X, Y, Z, H, S, T):
assert gate_sort(g(2)*g(1)*g(0)) == g(0)*g(1)*g(2)
e = gate_sort(X(1)*H(0)**2*CNOT(0, 1)*X(1)*X(0))
assert e == H(0)**2*CNOT(0, 1)*X(0)*X(1)**2
assert gate_sort(Z(0)*X(0)) == -X(0)*Z(0)
assert gate_sort(Z(0)*X(0)**2) == X(0)**2*Z(0)
assert gate_sort(Y(0)*H(0)) == -H(0)*Y(0)
assert gate_sort(Y(0)*X(0)) == -X(0)*Y(0)
assert gate_sort(Z(0)*Y(0)) == -Y(0)*Z(0)
assert gate_sort(T(0)*S(0)) == S(0)*T(0)
assert gate_sort(Z(0)*S(0)) == S(0)*Z(0)
assert gate_sort(Z(0)*T(0)) == T(0)*Z(0)
assert gate_sort(Z(0)*CNOT(0, 1)) == CNOT(0, 1)*Z(0)
assert gate_sort(S(0)*CNOT(0, 1)) == CNOT(0, 1)*S(0)
assert gate_sort(T(0)*CNOT(0, 1)) == CNOT(0, 1)*T(0)
assert gate_sort(X(1)*CNOT(0, 1)) == CNOT(0, 1)*X(1)
# This takes a long time and should only be uncommented once in a while.
# nqubits = 5
# ngates = 10
# trials = 10
# for i in range(trials):
# c = random_circuit(ngates, nqubits)
# assert represent(c, nqubits=nqubits) == \
# represent(gate_sort(c), nqubits=nqubits)
def test_gate_simp():
"""Test gate_simp."""
e = H(0)*X(1)*H(0)**2*CNOT(0, 1)*X(1)**3*X(0)*Z(3)**2*S(4)**3
assert gate_simp(e) == H(0)*CNOT(0, 1)*S(4)*X(0)*Z(4)
assert gate_simp(X(0)*X(0)) == 1
assert gate_simp(Y(0)*Y(0)) == 1
assert gate_simp(Z(0)*Z(0)) == 1
assert gate_simp(H(0)*H(0)) == 1
assert gate_simp(T(0)*T(0)) == S(0)
assert gate_simp(S(0)*S(0)) == Z(0)
assert gate_simp(Integer(1)) == Integer(1)
assert gate_simp(X(0)**2 + Y(0)**2) == Integer(2)
def test_swap_gate():
"""Test the SWAP gate."""
swap_gate_matrix = Matrix(
((1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 1)))
assert represent(SwapGate(1, 0).decompose(), nqubits=2) == swap_gate_matrix
assert qapply(SwapGate(1, 3)*Qubit('0010')) == Qubit('1000')
nqubits = 4
for i in range(nqubits):
for j in range(i):
assert represent(SwapGate(i, j), nqubits=nqubits) == \
represent(SwapGate(i, j).decompose(), nqubits=nqubits)
def test_one_qubit_commutators():
"""Test single qubit gate commutation relations."""
for g1 in (IdentityGate, X, Y, Z, H, T, S):
for g2 in (IdentityGate, X, Y, Z, H, T, S):
e = Commutator(g1(0), g2(0))
a = matrix_to_zero(represent(e, nqubits=1, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=1, format='sympy'))
assert a == b
e = Commutator(g1(0), g2(1))
assert e.doit() == 0
def test_one_qubit_anticommutators():
"""Test single qubit gate anticommutation relations."""
for g1 in (IdentityGate, X, Y, Z, H):
for g2 in (IdentityGate, X, Y, Z, H):
e = AntiCommutator(g1(0), g2(0))
a = matrix_to_zero(represent(e, nqubits=1, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=1, format='sympy'))
assert a == b
e = AntiCommutator(g1(0), g2(1))
a = matrix_to_zero(represent(e, nqubits=2, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=2, format='sympy'))
assert a == b
def test_cnot_commutators():
"""Test commutators of involving CNOT gates."""
assert Commutator(CNOT(0, 1), Z(0)).doit() == 0
assert Commutator(CNOT(0, 1), T(0)).doit() == 0
assert Commutator(CNOT(0, 1), S(0)).doit() == 0
assert Commutator(CNOT(0, 1), X(1)).doit() == 0
assert Commutator(CNOT(0, 1), CNOT(0, 1)).doit() == 0
assert Commutator(CNOT(0, 1), CNOT(0, 2)).doit() == 0
assert Commutator(CNOT(0, 2), CNOT(0, 1)).doit() == 0
assert Commutator(CNOT(1, 2), CNOT(1, 0)).doit() == 0
def test_random_circuit():
c = random_circuit(10, 3)
assert isinstance(c, Mul)
m = represent(c, nqubits=3)
assert m.shape == (8, 8)
assert isinstance(m, Matrix)
def test_hermitian_XGate():
x = XGate(1, 2)
x_dagger = Dagger(x)
assert (x == x_dagger)
def test_hermitian_YGate():
y = YGate(1, 2)
y_dagger = Dagger(y)
assert (y == y_dagger)
def test_hermitian_ZGate():
z = ZGate(1, 2)
z_dagger = Dagger(z)
assert (z == z_dagger)
def test_unitary_XGate():
x = XGate(1, 2)
x_dagger = Dagger(x)
assert (x*x_dagger == 1)
def test_unitary_YGate():
y = YGate(1, 2)
y_dagger = Dagger(y)
assert (y*y_dagger == 1)
def test_unitary_ZGate():
z = ZGate(1, 2)
z_dagger = Dagger(z)
assert (z*z_dagger == 1)
|
mit
|
tushortz/Zilch
|
tests/scorer_test.py
|
1
|
1159
|
import unittest
from zilch.die import Die
from zilch.scorer import Scorer
class TestScorerMethod(unittest.TestCase):
def test_score_one_of_a_kind(self):
die_value = Die(6).get_value()
value = Scorer().score_one_of_a_kind(die_value).score
self.assertEqual(value, 0)
die_value = Die(1).get_value()
value = Scorer().score_one_of_a_kind(die_value).score
self.assertEqual(value, 100)
die_value = Die(5).get_value()
value = Scorer().score_one_of_a_kind(die_value).score
self.assertEqual(value, 50)
def test_score_three_of_a_kind(self):
die_value = Die(1).get_value()
value = Scorer().score_three_of_a_kind(die_value).score
self.assertEqual(value, 1000)
die_value = Die(5).get_value()
value = Scorer().score_three_of_a_kind(die_value).score
self.assertEqual(value, 500)
def test_score_four_of_a_kind_and_a_pair(self):
die_value = [4, 2]
value = Scorer().score_four_of_a_kind_and_a_pair(die_value).score
print(value)
self.assertEqual(value, 500)
if __name__ == '__main__':
unittest.main()
|
mit
|
Maccimo/intellij-community
|
python/helpers/pydev/build_tools/build_binaries_osx.py
|
9
|
2204
|
from __future__ import unicode_literals
import os
import subprocess
import sys
from build import BINARY_DIRS, remove_binaries
miniconda64_envs = os.getenv('MINICONDA64_ENVS')
python_installations = [
r'%s/py27_64/bin/python' % miniconda64_envs,
r'%s/py35_64/bin/python' % miniconda64_envs,
r'%s/py36_64/bin/python' % miniconda64_envs,
r'%s/py37_64/bin/python' % miniconda64_envs,
r'%s/py38_64/bin/python' % miniconda64_envs,
r'%s/py39_64/bin/python' % miniconda64_envs,
]
root_dir = os.path.dirname(os.path.dirname(__file__))
def list_binaries():
for binary_dir in BINARY_DIRS:
for f in os.listdir(os.path.join(root_dir, binary_dir)):
if f.endswith('.so'):
yield f
def extract_version(python_install):
return python_install.split('/')[-3][2:]
def main():
from generate_code import generate_dont_trace_files
from generate_code import generate_cython_module
# First, make sure that our code is up to date.
generate_dont_trace_files()
generate_cython_module()
for python_install in python_installations:
assert os.path.exists(python_install)
remove_binaries(['.so'])
for f in list_binaries():
raise AssertionError('Binary not removed: %s' % (f,))
for i, python_install in enumerate(python_installations):
new_name = 'pydevd_cython_%s_%s' % (sys.platform, extract_version(python_install))
args = [
python_install, os.path.join(root_dir, 'build_tools', 'build.py'), '--no-remove-binaries', '--target-pyd-name=%s' % new_name, '--force-cython']
if i != 0:
args.append('--no-regenerate-files')
version_number = extract_version(python_install)
if version_number.startswith('36') or version_number.startswith('37') or version_number.startswith('38') \
or version_number.startswith('39'):
name_frame_eval = 'pydevd_frame_evaluator_%s_%s' % (sys.platform, extract_version(python_install))
args.append('--target-pyd-frame-eval=%s' % name_frame_eval)
print('Calling: %s' % (' '.join(args)))
subprocess.check_call(args)
if __name__ == '__main__':
main()
|
apache-2.0
|
yenliangl/bitcoin
|
test/functional/test_framework/netutil.py
|
16
|
5102
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tobytes()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
|
mit
|
scion-network/scion
|
src/scion/service/scion_management.py
|
1
|
7030
|
#!/usr/bin/env python
__author__ = 'Michael Meisinger'
from pyon.public import log, CFG, BadRequest, EventPublisher, Conflict, Unauthorized, AssociationQuery, NotFound, PRED, OT, ResourceQuery, RT
from pyon.util.containers import is_valid_identifier, parse_ion_ts, BASIC_VALID
from ion.service.identity_management_service import IdentityUtils
from scion.service.scion_base import ScionManagementServiceBase
from scion.service.scion_instrument import ScionInstrumentOps
from interface.objects import ActorIdentity, UserIdentityDetails, Credentials, ContactInformation
from interface.objects import MediaResponse, ResourceVisibilityEnum
EMAIL_VALID = BASIC_VALID + "@.-"
class ScionManagementService(ScionInstrumentOps):
def on_init(self):
log.info("SciON Management service starting")
ScionManagementServiceBase.on_init(self)
# Initialize helpers
ScionInstrumentOps._on_init(self)
# -------------------------------------------------------------------------
def read_user(self, user_id=''):
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
user_obj.credentials = None
return user_obj
def register_user(self, first_name='', last_name='', username='', password='', email=''):
return self.define_user(first_name=first_name, last_name=last_name, username=username,
password=password, email=email)
def define_user(self, user_id='', first_name='', last_name='', username='', password='',
email='', attributes=None):
if user_id:
raise NotImplementedError("Update not supported: user_id=%s" % user_id)
if not email:
raise BadRequest('Email is required')
username = username or email
user = self._get_user_by_email(email)
if user:
raise BadRequest("Email already taken")
if not username or not is_valid_identifier(username, valid_chars=EMAIL_VALID):
raise BadRequest("Argument username invalid: %s" % username)
if attributes and type(attributes) is not dict:
raise BadRequest("Argument attributes invalid type")
if not first_name:
first_name = username
attributes = attributes or {}
full_name = ("%s %s" % (first_name, last_name)) if last_name else first_name
IdentityUtils.check_password_policy(password)
contact = ContactInformation(individual_names_given=first_name, individual_name_family=last_name, email=email)
user_profile = UserIdentityDetails(contact=contact, profile=attributes)
actor_obj = ActorIdentity(name=full_name, details=user_profile)
# Support fast setting of credentials without expensive compute of bcrypt hash, for quick preload
pwd_salt, pwd_hash = None, None
if attributes and "scion_init_pwdsalt" in attributes and "scion_init_pwdhash" in attributes:
pwd_salt, pwd_hash = attributes.pop("scion_init_pwdsalt"), attributes.pop("scion_init_pwdhash")
user_exists = self.idm_client.is_user_existing(username)
if user_exists:
raise BadRequest("Username already taken")
actor_id = self.idm_client.create_actor_identity(actor_obj)
if pwd_salt and pwd_hash:
# Add to credentials
actor_obj1 = self.rr.read(actor_id)
cred_obj = None
for cred in actor_obj1.credentials:
if cred.username == username:
cred_obj = cred
break
if not cred_obj:
cred_obj = Credentials()
cred_obj.username = username
actor_obj1.credentials.append(cred_obj)
actor_obj1.alt_ids.append("UNAME:" + username)
cred_obj.identity_provider = "SciON"
cred_obj.authentication_service = "SciON IdM"
cred_obj.password_salt = pwd_salt
cred_obj.password_hash = pwd_hash
self.rr.update(actor_obj1)
else:
self.idm_client.set_actor_credentials(actor_id, username, password)
return actor_id
def _get_user_by_email(self, email):
user_objs_rq = ResourceQuery()
user_objs_rq.set_filter(
user_objs_rq.filter_type(RT.ActorIdentity),
user_objs_rq.filter_attribute('details.contact.email', email))
users = self.rr.find_resources_ext(query=user_objs_rq.get_query(), id_only=False)
if users:
return users[0]
return None
def update_user_contact(self, user_id='', contact=None, contact_entries=None):
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
self._validate_arg_obj("contact", contact, OT.ContactInformation, optional=True)
if contact is None and not contact_entries:
raise BadRequest("Missing contact arguments")
address_fields = ("street_address", "city", "administrative_area", "postal_code", "country")
old_contact = user_obj.details.contact
old_address_parts = [getattr(old_contact, addr_attr) for addr_attr in address_fields if getattr(old_contact, addr_attr)]
old_address_str = ", ".join(old_address_parts)
if contact:
user_obj.details.contact = contact
elif contact_entries:
for attr, att_val in contact_entries.iteritems():
if att_val and hasattr(user_obj.details.contact, attr):
setattr(user_obj.details.contact, attr, att_val)
user_obj.details.contact._validate()
self.rr.update(user_obj)
def delete_user(self, user_id=''):
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
self.idm_client.delete_actor_identity(user_id)
def update_user_profile(self, user_id='', profile_entries=None):
profile_entries = profile_entries or {}
user_id = self._as_actor_id(user_id)
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
user_obj.details.profile.update(profile_entries)
self.rr.update(user_obj)
def get_user_profile(self, user_id='', settings_filter=None):
settings_filter = settings_filter or []
user_id = self._as_actor_id(user_id)
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
profile_data = user_obj.details.profile
if settings_filter:
profile_data = {k: v for k, v in profile_data.items() if k in settings_filter}
return profile_data
def change_password(self, old_pwd='', new_pwd=''):
user_id = self._get_actor_id()
user_obj = self._validate_resource_id("user_id", user_id, RT.ActorIdentity)
self.idm_client.check_actor_credentials(user_obj.credentials[0].username, old_pwd)
IdentityUtils.check_password_policy(new_pwd)
self.idm_client.set_actor_credentials(user_id, user_obj.credentials[0].username , new_pwd)
|
bsd-2-clause
|
mcsosa121/cafa
|
cafaenv/lib/python2.7/site-packages/django/contrib/sessions/backends/db.py
|
227
|
3637
|
import logging
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete()
|
mit
|
dslomov/intellij-community
|
python/lib/Lib/site-packages/django/contrib/auth/views.py
|
71
|
10263
|
import re
import urlparse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
# Avoid shadowing the login() view below.
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.contrib.auth.tokens import default_token_generator
from django.views.decorators.csrf import csrf_protect
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.sites.models import get_current_site
from django.http import HttpResponseRedirect, Http404, QueryDict
from django.template import RequestContext
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.views.decorators.cache import never_cache
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""Displays the login form and handles the login action."""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
netloc = urlparse.urlparse(redirect_to)[1]
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Heavier security check -- don't allow redirection to a different
# host.
elif netloc and netloc != request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
"Logs out the user and displays 'You are logged out' message."
from django.contrib.auth import logout
logout(request)
if next_page is None:
redirect_to = request.REQUEST.get(redirect_field_name, '')
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
else:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page or request.path)
def logout_then_login(request, login_url=None, current_app=None, extra_context=None):
"Logs out the user if he is logged in. Then redirects to the log-in page."
if not login_url:
login_url = settings.LOGIN_URL
return logout(request, login_url, current_app=current_app, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"Redirects the user to the login page, passing the given 'next' page"
if not login_url:
login_url = settings.LOGIN_URL
login_url_parts = list(urlparse.urlparse(login_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'request': request,
}
if is_admin_site:
opts = dict(opts, domain_override=request.META['HTTP_HOST'])
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
# Doesn't need csrf_protect since no-one can guess the URL
@never_cache
def password_reset_confirm(request, uidb36=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
assert uidb36 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(None)
else:
validlink = False
form = None
context = {
'form': form,
'validlink': validlink,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': settings.LOGIN_URL
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('django.contrib.auth.views.password_change_done')
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_change_done(request,
template_name='registration/password_change_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
|
apache-2.0
|
shaunstanislaus/ZeroNet
|
src/lib/pybitcointools/test_stealth.py
|
36
|
4274
|
import bitcoin as bc
import sys
import unittest
class TestStealth(unittest.TestCase):
def setUp(self):
if sys.getrecursionlimit() < 1000:
sys.setrecursionlimit(1000)
self.addr = 'vJmtjxSDxNPXL4RNapp9ARdqKz3uJyf1EDGjr1Fgqs9c8mYsVH82h8wvnA4i5rtJ57mr3kor1EVJrd4e5upACJd588xe52yXtzumxj'
self.scan_pub = '025e58a31122b38c86abc119b9379fe247410aee87a533f9c07b189aef6c3c1f52'
self.scan_priv = '3e49e7257cb31db997edb1cf8299af0f37e2663e2260e4b8033e49d39a6d02f2'
self.spend_pub = '03616562c98e7d7b74be409a787cec3a912122f3fb331a9bee9b0b73ce7b9f50af'
self.spend_priv = 'aa3db0cfb3edc94de4d10f873f8190843f2a17484f6021a95a7742302c744748'
self.ephem_pub = '03403d306ec35238384c7e340393335f9bc9bb4a2e574eb4e419452c4ea19f14b0'
self.ephem_priv = '9e63abaf8dcd5ea3919e6de0b6c544e00bf51bf92496113a01d6e369944dc091'
self.shared_secret = 'a4047ee231f4121e3a99a3a3378542e34a384b865a9917789920e1f13ffd91c6'
self.pay_pub = '02726112ad39cb6bf848b1b1ef30b88e35286bf99f746c2be575f96c0e02a9357c'
self.pay_priv = '4e422fb1e5e1db6c1f6ab32a7706d368ceb385e7fab098e633c5c5949c3b97cd'
self.testnet_addr = 'waPUuLLykSnY3itzf1AyrQZm42F7KyB7SR5zpfqmnzPXWhx9kXLzV3EcyqzDdpTwngiyCCMUqztS9S1d7XJs3JMt3MsHPDpBCudvx9'
def test_address_encoding(self):
sc_pub, sp_pub = bc.basic_stealth_address_to_pubkeys(self.addr)
self.assertEqual(sc_pub, self.scan_pub)
self.assertEqual(sp_pub, self.spend_pub)
stealth_addr2 = bc.pubkeys_to_basic_stealth_address(sc_pub, sp_pub)
self.assertEqual(stealth_addr2, self.addr)
magic_byte_testnet = 43
sc_pub, sp_pub = bc.basic_stealth_address_to_pubkeys(self.testnet_addr)
self.assertEqual(sc_pub, self.scan_pub)
self.assertEqual(sp_pub, self.spend_pub)
stealth_addr2 = bc.pubkeys_to_basic_stealth_address(sc_pub, sp_pub, magic_byte_testnet)
self.assertEqual(stealth_addr2, self.testnet_addr)
def test_shared_secret(self):
sh_sec = bc.shared_secret_sender(self.scan_pub, self.ephem_priv)
self.assertEqual(sh_sec, self.shared_secret)
sh_sec2 = bc.shared_secret_receiver(self.ephem_pub, self.scan_priv)
self.assertEqual(sh_sec2, self.shared_secret)
def test_uncover_pay_keys(self):
pub = bc.uncover_pay_pubkey_sender(self.scan_pub, self.spend_pub, self.ephem_priv)
pub2 = bc.uncover_pay_pubkey_receiver(self.scan_priv, self.spend_pub, self.ephem_pub)
self.assertEqual(pub, self.pay_pub)
self.assertEqual(pub2, self.pay_pub)
priv = bc.uncover_pay_privkey(self.scan_priv, self.spend_priv, self.ephem_pub)
self.assertEqual(priv, self.pay_priv)
def test_stealth_metadata_script(self):
nonce = int('deadbeef', 16)
script = bc.mk_stealth_metadata_script(self.ephem_pub, nonce)
self.assertEqual(script[6:], 'deadbeef' + self.ephem_pub)
eph_pub = bc.ephem_pubkey_from_tx_script(script)
self.assertEqual(eph_pub, self.ephem_pub)
def test_stealth_tx_outputs(self):
nonce = int('deadbeef', 16)
value = 10**8
outputs = bc.mk_stealth_tx_outputs(self.addr, value, self.ephem_priv, nonce)
self.assertEqual(outputs[0]['value'], 0)
self.assertEqual(outputs[0]['script'], '6a2606deadbeef' + self.ephem_pub)
self.assertEqual(outputs[1]['address'], bc.pubkey_to_address(self.pay_pub))
self.assertEqual(outputs[1]['value'], value)
outputs = bc.mk_stealth_tx_outputs(self.testnet_addr, value, self.ephem_priv, nonce, 'testnet')
self.assertEqual(outputs[0]['value'], 0)
self.assertEqual(outputs[0]['script'], '6a2606deadbeef' + self.ephem_pub)
self.assertEqual(outputs[1]['address'], bc.pubkey_to_address(self.pay_pub, 111))
self.assertEqual(outputs[1]['value'], value)
self.assertRaises(Exception, bc.mk_stealth_tx_outputs, self.testnet_addr, value, self.ephem_priv, nonce, 'btc')
self.assertRaises(Exception, bc.mk_stealth_tx_outputs, self.addr, value, self.ephem_priv, nonce, 'testnet')
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
Simran-B/arangodb
|
3rdParty/V8-4.3.61/test/mjsunit/testcfg.py
|
13
|
4019
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class MjsunitTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(MjsunitTestSuite, self).__init__(name, root)
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js") and filename != "mjsunit.js":
testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
flags = [] + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
while True:
if files_match:
files_list += files_match.group(1).strip().split()
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
files = [ os.path.normpath(os.path.join(self.root, '..', '..', f))
for f in files_list ]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
if SELF_SCRIPT_PATTERN.search(source):
env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
files = env + files
if not context.no_harness:
files.append(os.path.join(self.root, "mjsunit.js"))
if MODULE_PATTERN.search(source):
files.append("--module")
files.append(testfilename)
flags += files
if context.isolates:
flags.append("--isolate")
flags += files
return testcase.flags + flags
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
with open(filename) as f:
return f.read()
def GetSuite(name, root):
return MjsunitTestSuite(name, root)
|
apache-2.0
|
mrjacobagilbert/gnuradio
|
gr-utils/modtool/cli/disable.py
|
6
|
1119
|
#
# Copyright 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" Disable blocks module """
import click
from ..core import get_block_candidates, ModToolDisable
from ..tools import SequenceCompleter
from .base import common_params, block_name, run, cli_input
@click.command('disable', short_help=ModToolDisable.description)
@common_params
@block_name
def cli(**kwargs):
"""Disable a block (comments out CMake entries for files)"""
kwargs['cli'] = True
self = ModToolDisable(**kwargs)
click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
get_pattern(self)
run(self)
def get_pattern(self):
""" Get the regex pattern for block(s) to be disabled """
if self.info['pattern'] is None:
block_candidates = get_block_candidates()
with SequenceCompleter(block_candidates):
self.info['pattern'] = cli_input('Which blocks do you want to disable? (Regex): ')
if not self.info['pattern'] or self.info['pattern'].isspace():
self.info['pattern'] = '.'
|
gpl-3.0
|
ononeor12/python-social-auth
|
social/actions.py
|
63
|
4788
|
from social.p3 import quote
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, partial_pipeline_data, setting_url
def do_auth(backend, redirect_name='next'):
# Clean any partial pipeline data
backend.strategy.clean_partial_pipeline()
# Save any defined next value into session
data = backend.strategy.request_data(merge=False)
# Save extra data into session.
for field_name in backend.setting('FIELDS_STORED_IN_SESSION', []):
if field_name in data:
backend.strategy.session_set(field_name, data[field_name])
if redirect_name in data:
# Check and sanitize a user-defined GET/POST next field value
redirect_uri = data[redirect_name]
if backend.setting('SANITIZE_REDIRECTS', True):
redirect_uri = sanitize_redirect(backend.strategy.request_host(),
redirect_uri)
backend.strategy.session_set(
redirect_name,
redirect_uri or backend.setting('LOGIN_REDIRECT_URL')
)
return backend.start()
def do_complete(backend, login, user=None, redirect_name='next',
*args, **kwargs):
data = backend.strategy.request_data()
is_authenticated = user_is_authenticated(user)
user = is_authenticated and user or None
partial = partial_pipeline_data(backend, user, *args, **kwargs)
if partial:
xargs, xkwargs = partial
user = backend.continue_pipeline(*xargs, **xkwargs)
else:
user = backend.complete(user=user, *args, **kwargs)
# pop redirect value before the session is trashed on login(), but after
# the pipeline so that the pipeline can change the redirect if needed
redirect_value = backend.strategy.session_get(redirect_name, '') or \
data.get(redirect_name, '')
user_model = backend.strategy.storage.user.user_model()
if user and not isinstance(user, user_model):
return user
if is_authenticated:
if not user:
url = setting_url(backend, redirect_value, 'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'NEW_ASSOCIATION_REDIRECT_URL',
'LOGIN_REDIRECT_URL')
elif user:
if user_is_active(user):
# catch is_new/social_user in case login() resets the instance
is_new = getattr(user, 'is_new', False)
social_user = user.social_user
login(backend, user, social_user)
# store last login backend name in session
backend.strategy.session_set('social_auth_last_login_backend',
social_user.provider)
if is_new:
url = setting_url(backend,
'NEW_USER_REDIRECT_URL',
redirect_value,
'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'LOGIN_REDIRECT_URL')
else:
if backend.setting('INACTIVE_USER_LOGIN', False):
social_user = user.social_user
login(backend, user, social_user)
url = setting_url(backend, 'INACTIVE_USER_URL', 'LOGIN_ERROR_URL',
'LOGIN_URL')
else:
url = setting_url(backend, 'LOGIN_ERROR_URL', 'LOGIN_URL')
if redirect_value and redirect_value != url:
redirect_value = quote(redirect_value)
url += ('?' in url and '&' or '?') + \
'{0}={1}'.format(redirect_name, redirect_value)
if backend.setting('SANITIZE_REDIRECTS', True):
url = sanitize_redirect(backend.strategy.request_host(), url) or \
backend.setting('LOGIN_REDIRECT_URL')
return backend.strategy.redirect(url)
def do_disconnect(backend, user, association_id=None, redirect_name='next',
*args, **kwargs):
partial = partial_pipeline_data(backend, user, *args, **kwargs)
if partial:
xargs, xkwargs = partial
if association_id and not xkwargs.get('association_id'):
xkwargs['association_id'] = association_id
response = backend.disconnect(*xargs, **xkwargs)
else:
response = backend.disconnect(user=user, association_id=association_id,
*args, **kwargs)
if isinstance(response, dict):
response = backend.strategy.redirect(
backend.strategy.request_data().get(redirect_name, '') or
backend.setting('DISCONNECT_REDIRECT_URL') or
backend.setting('LOGIN_REDIRECT_URL')
)
return response
|
bsd-3-clause
|
djrscally/eve-wspace
|
evewspace/Recruitment/migrations/0001_initial.py
|
18
|
14593
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Interest'
db.create_table('Recruitment_interest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('Recruitment', ['Interest'])
# Adding model 'Action'
db.create_table('Recruitment_action', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('Recruitment', ['Action'])
# Adding model 'Application'
db.create_table('Recruitment_application', (
('applicant', self.gf('django.db.models.fields.related.OneToOneField')(related_name='application', unique=True, primary_key=True, to=orm['auth.User'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
('killboard', self.gf('django.db.models.fields.CharField')(max_length=100)),
('closetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('disposition', self.gf('django.db.models.fields.IntegerField')()),
('intelclear', self.gf('django.db.models.fields.DateTimeField')()),
('standingsclear', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('Recruitment', ['Application'])
# Adding M2M table for field interests on 'Application'
db.create_table('Recruitment_application_interests', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('application', models.ForeignKey(orm['Recruitment.application'], null=False)),
('interest', models.ForeignKey(orm['Recruitment.interest'], null=False))
))
db.create_unique('Recruitment_application_interests', ['application_id', 'interest_id'])
# Adding model 'AppVote'
db.create_table('Recruitment_appvote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(related_name='votes', to=orm['Recruitment.Application'])),
('vote', self.gf('django.db.models.fields.related.ForeignKey')(related_name='appvotes', to=orm['auth.User'])),
('disposition', self.gf('django.db.models.fields.IntegerField')()),
('note', self.gf('django.db.models.fields.TextField')()),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('Recruitment', ['AppVote'])
# Adding model 'AppAction'
db.create_table('Recruitment_appaction', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(related_name='actions', to=orm['Recruitment.Application'])),
('action', self.gf('django.db.models.fields.related.ForeignKey')(related_name='instances', to=orm['Recruitment.Action'])),
('note', self.gf('django.db.models.fields.TextField')()),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('Recruitment', ['AppAction'])
# Adding model 'Interview'
db.create_table('Recruitment_interview', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(related_name='interviews', to=orm['Recruitment.Application'])),
('interviewer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='interviews', to=orm['auth.User'])),
('chatlog', self.gf('django.db.models.fields.TextField')()),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('Recruitment', ['Interview'])
# Adding model 'AppQuestion'
db.create_table('Recruitment_appquestion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('Recruitment', ['AppQuestion'])
# Adding model 'AppResponse'
db.create_table('Recruitment_appresponse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(related_name='responses', to=orm['Recruitment.Application'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='responses', to=orm['Recruitment.AppQuestion'])),
('response', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('Recruitment', ['AppResponse'])
# Adding model 'StandigsRequirement'
db.create_table('Recruitment_standigsrequirement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entity', self.gf('django.db.models.fields.CharField')(max_length=100)),
('standing', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('entitytype', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('Recruitment', ['StandigsRequirement'])
def backwards(self, orm):
# Deleting model 'Interest'
db.delete_table('Recruitment_interest')
# Deleting model 'Action'
db.delete_table('Recruitment_action')
# Deleting model 'Application'
db.delete_table('Recruitment_application')
# Removing M2M table for field interests on 'Application'
db.delete_table('Recruitment_application_interests')
# Deleting model 'AppVote'
db.delete_table('Recruitment_appvote')
# Deleting model 'AppAction'
db.delete_table('Recruitment_appaction')
# Deleting model 'Interview'
db.delete_table('Recruitment_interview')
# Deleting model 'AppQuestion'
db.delete_table('Recruitment_appquestion')
# Deleting model 'AppResponse'
db.delete_table('Recruitment_appresponse')
# Deleting model 'StandigsRequirement'
db.delete_table('Recruitment_standigsrequirement')
models = {
'Recruitment.action': {
'Meta': {'object_name': 'Action'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'Recruitment.appaction': {
'Meta': {'object_name': 'AppAction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'to': "orm['Recruitment.Action']"}),
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['Recruitment.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
'Recruitment.application': {
'Meta': {'object_name': 'Application'},
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'application'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['auth.User']"}),
'closetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disposition': ('django.db.models.fields.IntegerField', [], {}),
'intelclear': ('django.db.models.fields.DateTimeField', [], {}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Recruitment.Interest']", 'symmetrical': 'False'}),
'killboard': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'standingsclear': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
'Recruitment.appquestion': {
'Meta': {'object_name': 'AppQuestion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'Recruitment.appresponse': {
'Meta': {'object_name': 'AppResponse'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['Recruitment.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['Recruitment.AppQuestion']"}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'Recruitment.appvote': {
'Meta': {'object_name': 'AppVote'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['Recruitment.Application']"}),
'disposition': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'appvotes'", 'to': "orm['auth.User']"})
},
'Recruitment.interest': {
'Meta': {'object_name': 'Interest'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'Recruitment.interview': {
'Meta': {'object_name': 'Interview'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interviews'", 'to': "orm['Recruitment.Application']"}),
'chatlog': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interviews'", 'to': "orm['auth.User']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
'Recruitment.standigsrequirement': {
'Meta': {'object_name': 'StandigsRequirement'},
'entity': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'entitytype': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Recruitment']
|
gpl-3.0
|
k0ste/ansible
|
lib/ansible/executor/powershell/module_manifest.py
|
4
|
16789
|
# (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import errno
import json
import os
import pkgutil
import random
import re
from distutils.version import LooseVersion
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.compat.importlib import import_module
from ansible.plugins.loader import ps_module_utils_loader
from ansible.utils.collection_loader import resource_from_fqcr
class PSModuleDepFinder(object):
def __init__(self):
# This is also used by validate-modules to get a module's required utils in base and a collection.
self.ps_modules = dict()
self.exec_scripts = dict()
# by defining an explicit dict of cs utils and where they are used, we
# can potentially save time by not adding the type multiple times if it
# isn't needed
self.cs_utils_wrapper = dict()
self.cs_utils_module = dict()
self.ps_version = None
self.os_version = None
self.become = False
self._re_cs_module = [
# Reference C# module_util in another C# util, this must always be the fully qualified name.
# 'using ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
re.compile(to_bytes(r'(?i)^using\s((Ansible\..+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+));\s*$')),
]
self._re_cs_in_ps_module = [
# Reference C# module_util in a PowerShell module
# '#AnsibleRequires -CSharpUtil Ansible.{name}'
# '#AnsibleRequires -CSharpUtil ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
# '#AnsibleRequires -CSharpUtil ..module_utils.{name}'
re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((Ansible\..+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
r'(\.[\w\.]+))')),
]
self._re_ps_module = [
# Original way of referencing a builtin module_util
# '#Requires -Module Ansible.ModuleUtils.{name}
re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)')),
# New way of referencing a builtin and collection module_util
# '#AnsibleRequires -PowerShell Ansible.ModuleUtils.{name}'
# '#AnsibleRequires -PowerShell ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
# '#AnsibleRequires -PowerShell ..module_utils.{name}'
re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-powershell\s+((Ansible\.ModuleUtils\..+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
r'(\.[\w\.]+))')),
]
self._re_wrapper = re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-wrapper\s+(\w*)'))
self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
def scan_module(self, module_data, fqn=None, wrapper=False, powershell=True):
lines = module_data.split(b'\n')
module_utils = set()
if wrapper:
cs_utils = self.cs_utils_wrapper
else:
cs_utils = self.cs_utils_module
if powershell:
checks = [
# PS module contains '#Requires -Module Ansible.ModuleUtils.*'
# PS module contains '#AnsibleRequires -Powershell Ansible.*' (or collections module_utils ref)
(self._re_ps_module, self.ps_modules, ".psm1"),
# PS module contains '#AnsibleRequires -CSharpUtil Ansible.*' (or collections module_utils ref)
(self._re_cs_in_ps_module, cs_utils, ".cs"),
]
else:
checks = [
# CS module contains 'using Ansible.*;' or 'using ansible_collections.ns.coll.plugins.module_utils.*;'
(self._re_cs_module, cs_utils, ".cs"),
]
for line in lines:
for check in checks:
for pattern in check[0]:
match = pattern.match(line)
if match:
# tolerate windows line endings by stripping any remaining
# newline chars
module_util_name = to_text(match.group(1).rstrip())
if module_util_name not in check[1].keys():
module_utils.add((module_util_name, check[2], fqn))
break
if powershell:
ps_version_match = self._re_ps_version.match(line)
if ps_version_match:
self._parse_version_match(ps_version_match, "ps_version")
os_version_match = self._re_os_version.match(line)
if os_version_match:
self._parse_version_match(os_version_match, "os_version")
# once become is set, no need to keep on checking recursively
if not self.become:
become_match = self._re_become.match(line)
if become_match:
self.become = True
if wrapper:
wrapper_match = self._re_wrapper.match(line)
if wrapper_match:
self.scan_exec_script(wrapper_match.group(1).rstrip())
# recursively drill into each Requires to see if there are any more
# requirements
for m in set(module_utils):
self._add_module(m, wrapper=wrapper)
def scan_exec_script(self, name):
# scans lib/ansible/executor/powershell for scripts used in the module
# exec side. It also scans these scripts for any dependencies
name = to_text(name)
if name in self.exec_scripts.keys():
return
data = pkgutil.get_data("ansible.executor.powershell", to_native(name + ".ps1"))
if data is None:
raise AnsibleError("Could not find executor powershell script "
"for '%s'" % name)
b_data = to_bytes(data)
# remove comments to reduce the payload size in the exec wrappers
if C.DEFAULT_DEBUG:
exec_script = b_data
else:
exec_script = _strip_comments(b_data)
self.exec_scripts[name] = to_bytes(exec_script)
self.scan_module(b_data, wrapper=True, powershell=True)
def _add_module(self, name, wrapper=False):
m, ext, fqn = name
m = to_text(m)
util_fqn = None
if m.startswith("Ansible."):
# Builtin util, use plugin loader to get the data
mu_path = ps_module_utils_loader.find_plugin(m, ext)
if not mu_path:
raise AnsibleError('Could not find imported module support code '
'for \'%s\'' % m)
module_util_data = to_bytes(_slurp(mu_path))
else:
# Collection util, load the package data based on the util import.
submodules = m.split(".")
if m.startswith('.'):
fqn_submodules = fqn.split('.')
for submodule in submodules:
if submodule:
break
del fqn_submodules[-1]
submodules = fqn_submodules + [s for s in submodules if s]
n_package_name = to_native('.'.join(submodules[:-1]), errors='surrogate_or_strict')
n_resource_name = to_native(submodules[-1] + ext, errors='surrogate_or_strict')
try:
module_util = import_module(n_package_name)
module_util_data = to_bytes(pkgutil.get_data(n_package_name, n_resource_name),
errors='surrogate_or_strict')
util_fqn = to_text("%s.%s " % (n_package_name, submodules[-1]), errors='surrogate_or_strict')
# Get the path of the util which is required for coverage collection.
resource_paths = list(module_util.__path__)
if len(resource_paths) != 1:
# This should never happen with a collection but we are just being defensive about it.
raise AnsibleError("Internal error: Referenced module_util package '%s' contains 0 or multiple "
"import locations when we only expect 1." % n_package_name)
mu_path = os.path.join(resource_paths[0], n_resource_name)
except OSError as err:
if err.errno == errno.ENOENT:
raise AnsibleError('Could not find collection imported module support code for \'%s\''
% to_native(m))
else:
raise
util_info = {
'data': module_util_data,
'path': to_text(mu_path),
}
if ext == ".psm1":
self.ps_modules[m] = util_info
else:
if wrapper:
self.cs_utils_wrapper[m] = util_info
else:
self.cs_utils_module[m] = util_info
self.scan_module(module_util_data, fqn=util_fqn, wrapper=wrapper, powershell=(ext == ".psm1"))
def _parse_version_match(self, match, attribute):
new_version = to_text(match.group(1)).rstrip()
# PowerShell cannot cast a string of "1" to Version, it must have at
# least the major.minor for it to be valid so we append 0
if match.group(2) is None:
new_version = "%s.0" % new_version
existing_version = getattr(self, attribute, None)
if existing_version is None:
setattr(self, attribute, new_version)
else:
# determine which is the latest version and set that
if LooseVersion(new_version) > LooseVersion(existing_version):
setattr(self, attribute, new_version)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s"
% os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
start_block = False
for line in source.splitlines():
l = line.strip()
if start_block and l.endswith(b'#>'):
start_block = False
continue
elif start_block:
continue
elif l.startswith(b'<#'):
start_block = True
continue
elif not l or l.startswith(b'#'):
continue
buf.append(line)
return b'\n'.join(buf)
def _create_powershell_wrapper(b_module_data, module_path, module_args,
environment, async_timeout, become,
become_method, become_user, become_password,
become_flags, substyle, task_vars, module_fqn):
# creates the manifest/wrapper used in PowerShell/C# modules to enable
# things like become and async - this is also called in action/script.py
# FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
# if running under a persistent connection and substyle is C# so we
# don't have type conflicts
finder = PSModuleDepFinder()
if substyle != 'script':
# don't scan the module for util dependencies and other Ansible related
# flags if the substyle is 'script' which is set by action/script
finder.scan_module(b_module_data, fqn=module_fqn, powershell=(substyle == "powershell"))
module_wrapper = "module_%s_wrapper" % substyle
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
csharp_utils=dict(),
csharp_utils_module=list(), # csharp_utils only required by a module
module_args=module_args,
actions=[module_wrapper],
environment=environment,
encoded_output=False,
)
finder.scan_exec_script(module_wrapper)
if async_timeout > 0:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('async_watchdog')
finder.scan_exec_script('async_wrapper')
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
if become and resource_from_fqcr(become_method) == 'runas': # runas and namespace.collection.runas
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest["actions"].insert(0, 'become_wrapper')
exec_manifest["become_user"] = become_user
exec_manifest["become_password"] = become_password
exec_manifest['become_flags'] = become_flags
exec_manifest['min_ps_version'] = finder.ps_version
exec_manifest['min_os_version'] = finder.os_version
if finder.become and 'become_wrapper' not in exec_manifest['actions']:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest['actions'].insert(0, 'become_wrapper')
exec_manifest['become_user'] = 'SYSTEM'
exec_manifest['become_password'] = None
exec_manifest['become_flags'] = None
coverage_manifest = dict(
module_path=module_path,
module_util_paths=dict(),
output=None,
)
coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
if coverage_output and substyle == 'powershell':
finder.scan_exec_script('coverage_wrapper')
coverage_manifest['output'] = coverage_output
coverage_whitelist = C.config.get_config_value('COVERAGE_REMOTE_WHITELIST', variables=task_vars)
coverage_manifest['whitelist'] = coverage_whitelist
# make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None),
wrapper=False)
# exec_wrapper is only required to be part of the payload if using
# become or async, to save on payload space we check if exec_wrapper has
# already been added, and remove it manually if it hasn't later
exec_required = "exec_wrapper" in finder.exec_scripts.keys()
finder.scan_exec_script("exec_wrapper")
# must contain an empty newline so it runs the begin/process/end block
finder.exec_scripts["exec_wrapper"] += b"\n\n"
exec_wrapper = finder.exec_scripts["exec_wrapper"]
if not exec_required:
finder.exec_scripts.pop("exec_wrapper")
for name, data in finder.exec_scripts.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest[name] = b64_data
for name, data in finder.ps_modules.items():
b64_data = to_text(base64.b64encode(data['data']))
exec_manifest['powershell_modules'][name] = b64_data
coverage_manifest['module_util_paths'][name] = data['path']
cs_utils = {}
for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
for name, data in cs_util.items():
cs_utils[name] = data['data']
for name, data in cs_utils.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest['csharp_utils'][name] = b64_data
exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
# To save on the data we are sending across we only add the coverage info if coverage is being run
if 'coverage_wrapper' in exec_manifest:
exec_manifest['coverage'] = coverage_manifest
b_json = to_bytes(json.dumps(exec_manifest))
# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
b_data = exec_wrapper + b'\0\0\0\0' + b_json
return b_data
|
gpl-3.0
|
gsehub/edx-platform
|
openedx/core/djangoapps/content/block_structure/tests/test_models.py
|
13
|
7435
|
"""
Unit tests for Block Structure models.
"""
# pylint: disable=protected-access
import ddt
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.test import TestCase
from django.utils.timezone import now
from itertools import product
from mock import patch, Mock
from uuid import uuid4
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from ..exceptions import BlockStructureNotFound
from ..models import BlockStructureModel, _directory_name, _storage_error_handling
@ddt.ddt
class BlockStructureModelTestCase(TestCase):
"""
Tests for BlockStructureModel.
"""
def setUp(self):
super(BlockStructureModelTestCase, self).setUp()
self.course_key = CourseLocator('org', 'course', unicode(uuid4()))
self.usage_key = BlockUsageLocator(course_key=self.course_key, block_type='course', block_id='course')
self.params = self._create_bsm_params()
def tearDown(self):
BlockStructureModel._prune_files(self.usage_key, num_to_keep=0)
super(BlockStructureModelTestCase, self).tearDown()
def _assert_bsm_fields(self, bsm, expected_serialized_data):
"""
Verifies that the field values and serialized data
on the given bsm are as expected.
"""
for field_name, field_value in self.params.iteritems():
self.assertEqual(field_value, getattr(bsm, field_name))
self.assertEqual(bsm.get_serialized_data(), expected_serialized_data)
self.assertIn(_directory_name(self.usage_key), bsm.data.name)
def _assert_file_count_equal(self, expected_count):
"""
Asserts the number of files for self.usage_key
is as expected.
"""
self.assertEqual(len(BlockStructureModel._get_all_files(self.usage_key)), expected_count)
def _create_bsm_params(self):
"""
Returns the parameters for creating a BlockStructureModel.
"""
return dict(
data_usage_key=self.usage_key,
data_version='DV',
data_edit_timestamp=now(),
transformers_schema_version='TV',
block_structure_schema_version=unicode(1),
)
def _verify_update_or_create_call(self, serialized_data, mock_log=None, expect_created=None):
"""
Calls BlockStructureModel.update_or_create
and verifies the response.
"""
bsm, created = BlockStructureModel.update_or_create(serialized_data, **self.params)
if mock_log:
self.assertEqual("Created" if expect_created else "Updated", mock_log.info.call_args[0][1])
self.assertEqual(len(serialized_data), mock_log.info.call_args[0][6])
self._assert_bsm_fields(bsm, serialized_data)
if expect_created is not None:
self.assertEqual(created, expect_created)
return bsm
@patch('openedx.core.djangoapps.content.block_structure.models.log')
@patch.dict(settings.BLOCK_STRUCTURES_SETTINGS, {'PRUNING_ACTIVE': False})
def test_update_or_create(self, mock_log):
serialized_data = 'initial data'
# shouldn't already exist
with self.assertRaises(BlockStructureNotFound):
BlockStructureModel.get(self.usage_key)
self.assertIn("BlockStructure: Not found in table;", mock_log.info.call_args[0][0])
# create an entry
bsm = self._verify_update_or_create_call(serialized_data, mock_log, expect_created=True)
# get entry
found_bsm = BlockStructureModel.get(self.usage_key)
self._assert_bsm_fields(found_bsm, serialized_data)
self.assertIn("Read", mock_log.info.call_args[0][1])
# update entry
self.params.update(dict(data_version='new version'))
updated_serialized_data = 'updated data'
updated_bsm = self._verify_update_or_create_call(updated_serialized_data, mock_log, expect_created=False)
self.assertNotEqual(bsm.data.name, updated_bsm.data.name)
# old files not pruned
self._assert_file_count_equal(2)
@patch('openedx.core.djangoapps.content.block_structure.config.num_versions_to_keep', Mock(return_value=1))
def test_prune_files(self):
self._verify_update_or_create_call('test data', expect_created=True)
self._verify_update_or_create_call('updated data', expect_created=False)
self._assert_file_count_equal(1)
@patch('openedx.core.djangoapps.content.block_structure.config.num_versions_to_keep', Mock(return_value=1))
@patch('openedx.core.djangoapps.content.block_structure.models.BlockStructureModel._delete_files')
@patch('openedx.core.djangoapps.content.block_structure.models.log')
def test_prune_exception(self, mock_log, mock_delete):
mock_delete.side_effect = Exception
self._verify_update_or_create_call('test data', expect_created=True)
self._verify_update_or_create_call('updated data', expect_created=False)
self.assertIn('BlockStructure: Exception when deleting old files', mock_log.exception.call_args[0][0])
self._assert_file_count_equal(2) # old files not pruned
@ddt.data(
*product(
range(1, 3), # prune_keep_count
range(4), # num_prior_edits
)
)
@ddt.unpack
def test_prune_keep_count(self, prune_keep_count, num_prior_edits):
with patch(
'openedx.core.djangoapps.content.block_structure.config.num_versions_to_keep',
return_value=prune_keep_count,
):
for x in range(num_prior_edits):
self._verify_update_or_create_call('data_{}'.format(x))
if num_prior_edits:
self._assert_file_count_equal(min(num_prior_edits, prune_keep_count))
self._verify_update_or_create_call('data_final')
self._assert_file_count_equal(min(num_prior_edits + 1, prune_keep_count))
@ddt.data(
(IOError, BlockStructureNotFound, True),
(IOError, IOError, False),
(SuspiciousOperation, BlockStructureNotFound, True),
(SuspiciousOperation, SuspiciousOperation, False),
(OSError, OSError, True),
(OSError, OSError, False),
)
@ddt.unpack
def test_error_handling(self, error_raised_in_operation, expected_error_raised, is_read_operation):
bs_model, _ = BlockStructureModel.update_or_create('test data', **self.params)
with self.assertRaises(expected_error_raised):
with _storage_error_handling(bs_model, 'operation', is_read_operation):
raise error_raised_in_operation
@patch('openedx.core.djangoapps.content.block_structure.models.log')
def test_old_mongo_keys(self, mock_log):
self.course_key = CourseLocator('org2', 'course2', unicode(uuid4()), deprecated=True)
self.usage_key = BlockUsageLocator(course_key=self.course_key, block_type='course', block_id='course')
serialized_data = 'test data for old course'
self.params['data_usage_key'] = self.usage_key
with patch('xmodule.modulestore.mixed.MixedModuleStore.fill_in_run') as mock_fill_in_run:
mock_fill_in_run.return_value = self.usage_key.course_key
self._verify_update_or_create_call(serialized_data, mock_log, expect_created=True)
found_bsm = BlockStructureModel.get(self.usage_key)
self._assert_bsm_fields(found_bsm, serialized_data)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.