repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fanwenl/kindle-image | alien_invasion/setting.py | 1 | 1456 | """ 该文件是游戏的一些设置选项 """
class Settings():
""" 存储游戏的所有设置的类 """
def __init__(self):
""" 初始化游戏的设置 """
self.screen_width = 1920
self.screen_height = 900
self.bg_color = (230, 230, 230)
# 飞船设置
self.ship_limit = 3
# 设置子弹
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 5
# 外星人设置
self.fleet_drop_speed = 10
# 以什么样的速度加快游戏
self.speedup_scale = 1.1
# 外星人点数的提高速度
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
""" 初始化随游戏进行而改变的设置 """
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 1
self.alien_speed_factor = 1
# direction为1表示向右,为-1表示向左
self.fleet_direction = 1
# 记分
self.alien_points = 50
def increase_speed(self):
""" 提高速度设置 """
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
# 击杀外星人的每个得分
self.alien_points = int(self.alien_points * self.score_scale) | apache-2.0 | 617,501,777,982,876,500 | 23.78 | 69 | 0.548465 | false |
tima/ansible | lib/ansible/modules/network/nxos/nxos_system.py | 31 | 11749 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_system
extends_documentation_fragment: nxos
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco NXOS devices
description:
- This module provides declarative management of node system attributes
on Cisco NXOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configures the default domain
name suffix to be used when referencing this node by its
FQDN. This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
domain_lookup:
description:
- Enables or disables the DNS
lookup feature in Cisco NXOS. This argument accepts boolean
values. When enabled, the system will try to resolve hostnames
using DNS and when disabled, hostnames will not be resolved.
domain_search:
description:
- Configures a list of domain
name suffixes to search when performing DNS name resolution.
This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers or
a list of hashes that configure the name server and VRF name. See
examples.
system_mtu:
description:
- Specifies the mtu, must be an integer.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
nxos_system:
hostname: nxos01
domain_name: test.example.com
- name: remove configuration
nxos_system:
state: absent
- name: configure name servers
nxos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
- name: configure name servers with VRF support
nxos_system:
name_servers:
- { server: 8.8.8.8, vrf: mgmt }
- { server: 8.8.4.4, vrf: mgmt }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname nxos01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall(r'vrf context (\S+)', config)
return vrf in _CONFIGURED_VRFS
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) and (want.get(x) != have.get(x))
def difference(x, y, z):
return [item for item in x[z] if item not in y[z]]
def remove(cmd, commands, vrf=None):
if vrf:
commands.append('vrf context %s' % vrf)
commands.append(cmd)
if vrf:
commands.append('exit')
def add(cmd, commands, vrf=None):
if vrf:
if not has_vrf(module, vrf):
module.fail_json(msg='invalid vrf name %s' % vrf)
return remove(cmd, commands, vrf)
if state == 'absent':
if have['hostname']:
commands.append('no hostname')
for item in have['domain_name']:
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['domain_search']:
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['name_servers']:
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
if have['system_mtu']:
commands.append('no system jumbomtu')
if state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_lookup'):
cmd = 'ip domain-lookup'
if want['domain_lookup'] is False:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['domain_name']:
for item in difference(have, want, 'domain_name'):
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_name'):
cmd = 'ip domain-name %s' % item['name']
add(cmd, commands, item['vrf'])
if want['domain_search']:
for item in difference(have, want, 'domain_search'):
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_search'):
cmd = 'ip domain-list %s' % item['name']
add(cmd, commands, item['vrf'])
if want['name_servers']:
for item in difference(have, want, 'name_servers'):
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'name_servers'):
cmd = 'ip name-server %s' % item['server']
add(cmd, commands, item['vrf'])
if needs_update('system_mtu'):
commands.append('system jumbomtu %s' % want['system_mtu'])
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
def parse_domain_name(config, vrf_config):
objects = list()
regex = re.compile(r'ip domain-name (\S+)')
match = regex.search(config, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
match = regex.search(cfg, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': vrf})
return objects
def parse_domain_search(config, vrf_config):
objects = list()
for item in re.findall(r'^ip domain-list (\S+)', config, re.M):
objects.append({'name': item, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
for item in re.findall(r'ip domain-list (\S+)', cfg, re.M):
objects.append({'name': item, 'vrf': vrf})
return objects
def parse_name_servers(config, vrf_config, vrfs):
objects = list()
match = re.search('^ip name-server (.+)$', config, re.M)
if match:
for addr in match.group(1).split(' '):
if addr == 'use-vrf' or addr in vrfs:
continue
objects.append({'server': addr, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
vrf_match = re.search('ip name-server (.+)', cfg, re.M)
if vrf_match:
for addr in vrf_match.group(1).split(' '):
objects.append({'server': addr, 'vrf': vrf})
return objects
def parse_system_mtu(config):
match = re.search(r'^system jumbomtu (\d+)', config, re.M)
if match:
return int(match.group(1))
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=2, contents=config)
vrf_config = {}
vrfs = re.findall(r'^vrf context (\S+)$', config, re.M)
for vrf in vrfs:
config_data = configobj.get_block_config(path=['vrf context %s' % vrf])
vrf_config[vrf] = config_data
return {
'hostname': parse_hostname(config),
'domain_lookup': 'no ip domain-lookup' not in config,
'domain_name': parse_domain_name(config, vrf_config),
'domain_search': parse_domain_search(config, vrf_config),
'name_servers': parse_name_servers(config, vrf_config, vrfs),
'system_mtu': parse_system_mtu(config)
}
def validate_system_mtu(value, module):
if not 1500 <= value <= 9216:
module.fail_json(msg='system_mtu must be between 1500 and 9216')
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'domain_lookup': module.params['domain_lookup'],
'system_mtu': module.params['system_mtu']
}
domain_name = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
domain_search = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict()
), module)
for arg, cast in [('domain_name', domain_name), ('domain_search', domain_search),
('name_servers', name_servers)]:
if module.params[arg] is not None:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_lookup=dict(type='bool'),
# { name: <str>, vrf: <str> }
domain_name=dict(type='list'),
# {name: <str>, vrf: <str> }
domain_search=dict(type='list'),
# { server: <str>; vrf: <str> }
name_servers=dict(type='list'),
system_mtu=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,359,565,885,013,717,500 | 30 | 89 | 0.61086 | false |
40223245/2015cdb_g6-team1 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_program.py | 738 | 10833 | import io
import os
import sys
import unittest
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest.defaultTestLoader
module = '__main__'
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertTrue(getattr(program, attr))
for opt in short_opt, long_opt:
not_none = object()
setattr(program, attr, not_none)
program.parseArgs([None, opt])
self.assertEqual(getattr(program, attr), not_none)
def testWarning(self):
"""Test the warnings argument"""
# see #10535
class FakeTP(unittest.TestProgram):
def parseArgs(self, *args, **kw): pass
def runTests(self, *args, **kw): pass
warnoptions = sys.warnoptions[:]
try:
sys.warnoptions[:] = []
# no warn options, no arg -> default
self.assertEqual(FakeTP().warnings, 'default')
# no warn options, w/ arg -> arg value
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
sys.warnoptions[:] = ['somevalue']
# warn options, no arg -> None
# warn options, w/ arg -> arg value
self.assertEqual(FakeTP().warnings, None)
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
finally:
sys.warnoptions[:] = warnoptions
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.warnings = 'warnings'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer',
'warnings': 'warnings'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
def _patch_isfile(self, names, exists=True):
def isfile(path):
return path in names
original = os.path.isfile
os.path.isfile = isfile
def restore():
os.path.isfile = original
self.addCleanup(restore)
def testParseArgsFileNames(self):
# running tests with filenames instead of module names
program = self.program
argv = ['progname', 'foo.py', 'bar.Py', 'baz.PY', 'wing.txt']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
# note that 'wing.txt' is not a Python file so the name should
# *not* be converted to a module name
expected = ['foo', 'bar', 'baz', 'wing.txt']
self.assertEqual(program.testNames, expected)
def testParseArgsFilePaths(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsNonExistentFiles(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile([])
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
def testParseArgsAbsolutePathsThatCanBeConverted(self):
cur_dir = os.getcwd()
program = self.program
def _join(name):
return os.path.join(cur_dir, name)
argv = ['progname', _join('foo/bar/baz.py'), _join('green\\red.py')]
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsAbsolutePathsThatCannotBeConverted(self):
program = self.program
# even on Windows '/...' is considered absolute by os.path.abspath
argv = ['progname', '/foo/bar/baz.py', '/green/red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
# it may be better to use platform specific functions to normalise paths
# rather than accepting '.PY' and '\' as file seprator on Linux / Mac
# it would also be better to check that a filename is a valid module
# identifier (we have a regex for this in loader.py)
# for invalid filenames should we raise a useful error rather than
# leaving the current error message (import of filename fails) in place?
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 8,875,189,089,757,175,000 | 30.768328 | 89 | 0.592357 | false |
henridwyer/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause | -5,532,135,736,919,476,000 | 37.70229 | 79 | 0.68856 | false |
rue89-tech/edx-analytics-pipeline | edx/analytics/tasks/util/tests/test_opaque_key_util.py | 3 | 5866 | """
Tests for utilities that parse event logs.
"""
from opaque_keys.edx.locator import CourseLocator
import edx.analytics.tasks.util.opaque_key_util as opaque_key_util
from edx.analytics.tasks.tests import unittest
VALID_COURSE_ID = unicode(CourseLocator(org='org', course='course_id', run='course_run'))
VALID_LEGACY_COURSE_ID = "org/course_id/course_run"
INVALID_LEGACY_COURSE_ID = "org:course_id:course_run"
INVALID_NONASCII_LEGACY_COURSE_ID = u"org/course\ufffd_id/course_run"
VALID_NONASCII_LEGACY_COURSE_ID = u"org/cours\u00e9_id/course_run"
class CourseIdTest(unittest.TestCase):
"""
Verify that course_id filtering works correctly.
"""
def test_normal_opaque_course_id(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_COURSE_ID))
def test_normal_legacy_course_id(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_LEGACY_COURSE_ID))
def test_legacy_course_id_without_components(self):
self.assertFalse(opaque_key_util.is_valid_course_id(INVALID_LEGACY_COURSE_ID))
def test_course_id_with_valid_nonascii(self):
self.assertTrue(opaque_key_util.is_valid_course_id(VALID_NONASCII_LEGACY_COURSE_ID))
def test_course_id_with_invalid_nonascii(self):
self.assertFalse(opaque_key_util.is_valid_course_id(INVALID_NONASCII_LEGACY_COURSE_ID))
def test_no_course_id(self):
self.assertFalse(opaque_key_util.is_valid_course_id(None))
def test_valid_org_id(self):
self.assertTrue(opaque_key_util.is_valid_org_id(u'org_id\u00e9'))
def test_invalid_org_id(self):
self.assertFalse(opaque_key_util.is_valid_org_id(u'org\ufffd_id'))
def test_no_org_id(self):
self.assertFalse(opaque_key_util.is_valid_org_id(None))
def test_get_valid_org_id(self):
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_COURSE_ID), "org")
def test_get_valid_legacy_org_id(self):
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_LEGACY_COURSE_ID), "org")
self.assertEquals(opaque_key_util.get_org_id_for_course(VALID_NONASCII_LEGACY_COURSE_ID), "org")
def test_get_invalid_legacy_org_id(self):
self.assertIsNone(opaque_key_util.get_org_id_for_course(INVALID_LEGACY_COURSE_ID))
self.assertIsNone(opaque_key_util.get_org_id_for_course(INVALID_NONASCII_LEGACY_COURSE_ID))
def test_get_filename(self):
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID), "org_course_id_course_run")
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID, '-'), "org-course_id-course_run")
def test_get_filename_with_colon(self):
course_id = unicode(CourseLocator(org='org', course='course:id', run='course:run'))
self.assertEquals(opaque_key_util.get_filename_safe_course_id(VALID_COURSE_ID), "org_course_id_course_run")
self.assertEquals(opaque_key_util.get_filename_safe_course_id(course_id, '-'), "org-course-id-course-run")
def test_get_filename_for_legacy_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_LEGACY_COURSE_ID),
"org_course_id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_LEGACY_COURSE_ID, '-'),
"org-course_id-course_run"
)
def test_get_filename_for_invalid_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_LEGACY_COURSE_ID),
"org_course_id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_LEGACY_COURSE_ID, '-'),
"org-course_id-course_run"
)
def test_get_filename_for_nonascii_id(self):
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_NONASCII_LEGACY_COURSE_ID),
u"org_cours__id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(VALID_NONASCII_LEGACY_COURSE_ID, '-'),
u"org-cours-_id-course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_NONASCII_LEGACY_COURSE_ID),
u"org_course__id_course_run"
)
self.assertEquals(
opaque_key_util.get_filename_safe_course_id(INVALID_NONASCII_LEGACY_COURSE_ID, '-'),
u"org-course-_id-course_run"
)
def test_get_course_key_from_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_COURSE_ID)
def test_get_course_key_from_legacy_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_LEGACY_COURSE_ID)
def test_get_course_key_from_invalid_url(self):
url = "https://courses.edx.org/courses/{course_id}/stuff".format(course_id=INVALID_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertIsNone(course_key)
def test_get_course_key_from_nonascii_url(self):
url = u"https://courses.edx.org/courses/{course_id}/stuff".format(course_id=VALID_NONASCII_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertEquals(unicode(course_key), VALID_NONASCII_LEGACY_COURSE_ID)
url = u"https://courses.edx.org/courses/{course_id}/stuff".format(course_id=INVALID_NONASCII_LEGACY_COURSE_ID)
course_key = opaque_key_util.get_course_key_from_url(url)
self.assertIsNone(course_key)
| agpl-3.0 | -2,298,966,936,435,165,400 | 44.123077 | 120 | 0.678316 | false |
poppu-mtg/StackIt | StackIt/GUIapp.py | 1 | 4165 | import os, shutil, sys, time
from . import globals, builder
if sys.version_info.major == 3:
from tkinter import *
else:
from Tkinter import *
# from tkFileDialog import *
from PIL import Image, ImageTk
class ScrollIt():
def __init__(self):
self.image1 = Image.open(mGui.btn2text.get()[9:] + '-scroll.png')
w1, h1 = self.image1.size
self.imagefull = Image.new("RGB", (w1 * 2, h1), "black")
self.imagefull.paste(self.image1, (0, 0))
self.imagefull.paste(self.image1, (w1, 0))
self.photo1 = ImageTk.PhotoImage(self.imagefull)
width1 = self.photo1.width()
height1 = self.photo1.height()
novi1 = Toplevel()
self.canvas1 = Canvas(novi1, width=1980, height=34)
self.canvas1.pack(expand=1, fill=BOTH) # <--- Make your canvas expandable.
x = (width1)/2.0
y = (height1)/2.0
self.item = self.canvas1.create_image(x, y, image=self.photo1) # <--- Save the return value of the create_* method.
self.x00, self.y00 = self.canvas1.coords(self.item)
self.canvas1.bind('<Button-1>', self.next_image)
def next_image(self, even=None):
x0, y0 = self.canvas1.coords(self.item)
if x0 < 3:
self.canvas1.coords(self.item, (self.x00, y0))
else:
self.canvas1.move(self.item, -3, 0)
self.canvas1.after(60, self.next_image)
def OpenPro1():
if mGui.Listname.get() != '':
deckname = mGui.Listname.get()
elif len(mGui.Listentry.get("1.0", "end-1c")) != 0:
deckname = 'sample.txt'
if os.path.isfile(deckname):
os.remove(deckname)
decktext = mGui.Listentry.get("1.0", 'end-1c')
with open(deckname, "a") as outf:
outf.write(decktext + '\n')
builder.main(deckname)
if deckname == 'sample.txt':
if os.path.exists(os.path.join(globals.CACHE_PATH, deckname)):
os.remove(os.path.join(globals.CACHE_PATH, deckname))
shutil.move(deckname, os.path.join(globals.CACHE_PATH, deckname))
novi = Toplevel()
canvas = Canvas(novi, width = 350, height = 1000)
canvas.pack(expand = YES, fill = BOTH)
#gif1 = PhotoImage(file = 'image.gif')
gif1=ImageTk.PhotoImage(Image.open(deckname[:-4] + '.png'))
canvas.create_image(50, 10, image = gif1, anchor = NW)
#assigned the gif1 to the canvas object
canvas.gif1 = gif1
mGui.btn2text.set('BannerIt ' + deckname[:-4])
mGui.Button_2.config(state='active')
def OpenPro2():
ScrollIt()
mGui = Tk()
mGui.configure(background='white')
mGui.title(' StackIt')
mGui.geometry("350x565")
tkimage = ImageTk.PhotoImage(Image.open(os.path.join(globals.RESOURCES_PATH, 'StackIt-Logo.png')).resize((345, 87)))
mGui.Logo = Label(mGui, image=tkimage)
mGui.Logo.grid(row=0, column=0, columnspan=3)
mGui.Label1 = Label(mGui, text=' Decklist:')
mGui.Label1.grid(row=1, column=0)
mGui.Listname = Entry(mGui)
mGui.Listname.grid(row=1, column=1)
mGui.Button_1 = Button(mGui, text="Generate", command=OpenPro1)
mGui.Button_1.grid(row=1, column=2)
#mGui.Listentry=Entry(mGui)
#mGui.Listentry.grid(row=2, column=0, columnspan=3)
mGui.Label2 = Label(mGui, text=' Paste board:')
mGui.Label2.grid(row=2, column=0, columnspan=3)
mGui.Listentry=Text(mGui, height=25, width=40, relief=GROOVE, undo=True, xscrollcommand=True, yscrollcommand=True, bd=2)
mGui.Listentry.grid(row=3, column=0, columnspan=3)
mGui.btn2text = StringVar()
mGui.btn2text.set('BannerIt ')
mGui.Button_2 = Button(mGui, textvariable=mGui.btn2text, state='disabled', command=OpenPro2)
mGui.Button_2.grid(row=4, column=0, columnspan=3)
def main():
if len(sys.argv) > 1 and sys.argv[1] == "--automatedtest":
def draw():
mGui.update_idletasks()
mGui.update()
draw()
mGui.Listentry.insert(END, "60 Island\n4 Urza's Tower\n200 Shadowborn Apostle")
draw()
OpenPro1()
draw()
mGui.Listname.insert(END, "testdecks/StressTest1.dec")
draw()
OpenPro1()
draw()
time.sleep(1)
else:
mGui.mainloop()
if __name__ == "__main__":
main()
| mit | 8,641,570,162,352,748,000 | 30.08209 | 123 | 0.629532 | false |
ofer43211/unisubs | apps/teams/migrations/0102_auto__add_billingreport.py | 5 | 30439 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BillingReport'
db.create_table('teams_billingreport', (
('end_date', self.gf('django.db.models.fields.DateField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('processed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teams.Team'])),
('start_date', self.gf('django.db.models.fields.DateField')()),
('csv_data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('teams', ['BillingReport'])
def backwards(self, orm):
# Deleting model 'BillingReport'
db.delete_table('teams_billingreport')
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.billingreport': {
'Meta': {'object_name': 'BillingReport'},
'csv_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.invite': {
'Meta': {'object_name': 'Invite'},
'approved': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.membershipnarrowing': {
'Meta': {'object_name': 'MembershipNarrowing'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'narrowing_includer'", 'null': 'True', 'to': "orm['teams.TeamMember']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'narrowings'", 'to': "orm['teams.TeamMember']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.setting': {
'Meta': {'unique_together': "(('key', 'team'),)", 'object_name': 'Setting'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'approved': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'review_base_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tasks_based_on'", 'null': 'True', 'to': "orm['videos.SubtitleVersion']"}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tseams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teamlanguagepreference': {
'Meta': {'unique_together': "(('team', 'language_code'),)", 'object_name': 'TeamLanguagePreference'},
'allow_reads': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_writes': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'preferred': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lang_preferences'", 'to': "orm['teams.Team']"})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamnotificationsetting': {
'Meta': {'object_name': 'TeamNotificationSetting'},
'basic_auth_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basic_auth_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'request_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'notification_settings'", 'unique': 'True', 'to': "orm['teams.Team']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'teams.workflow': {
'Meta': {'unique_together': "(('team', 'project', 'team_video'),)", 'object_name': 'Workflow'},
'approve_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'autocreate_subtitle': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'autocreate_translate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'}),
'review_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams']
| agpl-3.0 | 1,801,086,215,611,305,000 | 91.801829 | 226 | 0.557508 | false |
BeegorMif/HTPC-Manager | lib/sqlalchemy/event/api.py | 75 | 3844 | # event/api.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .. import util, exc
from .base import _registrars
from .registry import _EventKey
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier, target))
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` function
will revert all of these operations.
.. versionadded:: 0.9.0
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
| gpl-3.0 | 1,422,416,332,737,700,000 | 28.343511 | 84 | 0.62565 | false |
jendap/tensorflow | tensorflow/python/ops/stateless_random_ops.py | 8 | 11692 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessTruncatedNormal")
@tf_export("random.stateless_uniform")
def stateless_random_uniform(shape,
seed,
minval=0,
maxval=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a uniform distribution.
This is a stateless version of `tf.random_uniform`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the
range of random values to generate. Defaults to 1 if `dtype` is floating
point.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "stateless_random_uniform",
[shape, seed, minval, maxval]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
return gen_stateless_random_ops.stateless_random_uniform_int(
shape, seed=seed, minval=minval, maxval=maxval, name=name)
else:
rnd = gen_stateless_random_ops.stateless_random_uniform(
shape, seed=seed, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
@tf_export("random.stateless_normal")
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export("random.stateless_truncated_normal")
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.truncated_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_truncated_normal(
shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export(v1=["random.stateless_multinomial"])
@deprecation.deprecated(
date=None, instructions="Use tf.random.stateless_categorical instead.")
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.multinomial`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_multinomial(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
output_dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples,
output_dtype, seed)
@tf_export("random.stateless_categorical")
def stateless_categorical(logits,
num_samples,
seed,
dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a categorical distribution.
This is a stateless version of `tf.categorical`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_categorical", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples, dtype,
seed)
def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed):
"""Implementation for stateless multinomial/categorical ops (v1/v2)."""
logits = ops.convert_to_tensor(logits, name="logits")
return gen_stateless_random_ops.stateless_multinomial(
logits, num_samples, seed, output_dtype=dtype)
| apache-2.0 | -259,855,489,347,556,220 | 42.790262 | 80 | 0.675676 | false |
kustodian/ansible | lib/ansible/module_utils/network/ftd/fdm_swagger_client.py | 19 | 26649 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.network.ftd.common import HTTPMethod
from ansible.module_utils.six import integer_types, string_types, iteritems
FILE_MODEL_NAME = '_File'
SUCCESS_RESPONSE_CODE = '200'
DELETE_PREFIX = 'delete'
class OperationField:
URL = 'url'
METHOD = 'method'
PARAMETERS = 'parameters'
MODEL_NAME = 'modelName'
DESCRIPTION = 'description'
RETURN_MULTIPLE_ITEMS = 'returnMultipleItems'
TAGS = "tags"
class SpecProp:
DEFINITIONS = 'definitions'
OPERATIONS = 'operations'
MODELS = 'models'
MODEL_OPERATIONS = 'model_operations'
class PropName:
ENUM = 'enum'
TYPE = 'type'
REQUIRED = 'required'
INVALID_TYPE = 'invalid_type'
REF = '$ref'
ALL_OF = 'allOf'
BASE_PATH = 'basePath'
PATHS = 'paths'
OPERATION_ID = 'operationId'
SCHEMA = 'schema'
ITEMS = 'items'
PROPERTIES = 'properties'
RESPONSES = 'responses'
NAME = 'name'
DESCRIPTION = 'description'
class PropType:
STRING = 'string'
BOOLEAN = 'boolean'
INTEGER = 'integer'
NUMBER = 'number'
OBJECT = 'object'
ARRAY = 'array'
FILE = 'file'
class OperationParams:
PATH = 'path'
QUERY = 'query'
class QueryParams:
FILTER = 'filter'
class PathParams:
OBJ_ID = 'objId'
def _get_model_name_from_url(schema_ref):
path = schema_ref.split('/')
return path[len(path) - 1]
class IllegalArgumentException(ValueError):
"""
Exception raised when the function parameters:
- not all passed
- empty string
- wrong type
"""
pass
class ValidationError(ValueError):
pass
class FdmSwaggerParser:
_definitions = None
_base_path = None
def parse_spec(self, spec, docs=None):
"""
This method simplifies a swagger format, resolves a model name for each operation, and adds documentation for
each operation and model if it is provided.
:param spec: An API specification in the swagger format, see
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md>
:type spec: dict
:param spec: A documentation map containing descriptions for models, operations and operation parameters.
:type docs: dict
:rtype: dict
:return:
Ex.
The models field contains model definition from swagger see
<#https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitions>
{
'models':{
'model_name':{...},
...
},
'operations':{
'operation_name':{
'method': 'get', #post, put, delete
'url': '/api/fdm/v2/object/networks', #url already contains a value from `basePath`
'modelName': 'NetworkObject', # it is a link to the model from 'models'
# None - for a delete operation or we don't have information
# '_File' - if an endpoint works with files
'returnMultipleItems': False, # shows if the operation returns a single item or an item list
'parameters': {
'path':{
'param_name':{
'type': 'string'#integer, boolean, number
'required' True #False
}
...
},
'query':{
'param_name':{
'type': 'string'#integer, boolean, number
'required' True #False
}
...
}
}
},
...
},
'model_operations':{
'model_name':{ # a list of operations available for the current model
'operation_name':{
... # the same as in the operations section
},
...
},
...
}
}
"""
self._definitions = spec[SpecProp.DEFINITIONS]
self._base_path = spec[PropName.BASE_PATH]
operations = self._get_operations(spec)
if docs:
operations = self._enrich_operations_with_docs(operations, docs)
self._definitions = self._enrich_definitions_with_docs(self._definitions, docs)
return {
SpecProp.MODELS: self._definitions,
SpecProp.OPERATIONS: operations,
SpecProp.MODEL_OPERATIONS: self._get_model_operations(operations)
}
@property
def base_path(self):
return self._base_path
def _get_model_operations(self, operations):
model_operations = {}
for operations_name, params in iteritems(operations):
model_name = params[OperationField.MODEL_NAME]
model_operations.setdefault(model_name, {})[operations_name] = params
return model_operations
def _get_operations(self, spec):
paths_dict = spec[PropName.PATHS]
operations_dict = {}
for url, operation_params in iteritems(paths_dict):
for method, params in iteritems(operation_params):
operation = {
OperationField.METHOD: method,
OperationField.URL: self._base_path + url,
OperationField.MODEL_NAME: self._get_model_name(method, params),
OperationField.RETURN_MULTIPLE_ITEMS: self._return_multiple_items(params),
OperationField.TAGS: params.get(OperationField.TAGS, [])
}
if OperationField.PARAMETERS in params:
operation[OperationField.PARAMETERS] = self._get_rest_params(params[OperationField.PARAMETERS])
operation_id = params[PropName.OPERATION_ID]
operations_dict[operation_id] = operation
return operations_dict
def _enrich_operations_with_docs(self, operations, docs):
def get_operation_docs(op):
op_url = op[OperationField.URL][len(self._base_path):]
return docs[PropName.PATHS].get(op_url, {}).get(op[OperationField.METHOD], {})
for operation in operations.values():
operation_docs = get_operation_docs(operation)
operation[OperationField.DESCRIPTION] = operation_docs.get(PropName.DESCRIPTION, '')
if OperationField.PARAMETERS in operation:
param_descriptions = dict((
(p[PropName.NAME], p[PropName.DESCRIPTION])
for p in operation_docs.get(OperationField.PARAMETERS, {})
))
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.PATH].items():
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.QUERY].items():
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
return operations
def _enrich_definitions_with_docs(self, definitions, docs):
for model_name, model_def in definitions.items():
model_docs = docs[SpecProp.DEFINITIONS].get(model_name, {})
model_def[PropName.DESCRIPTION] = model_docs.get(PropName.DESCRIPTION, '')
for prop_name, prop_spec in model_def.get(PropName.PROPERTIES, {}).items():
prop_spec[PropName.DESCRIPTION] = model_docs.get(PropName.PROPERTIES, {}).get(prop_name, '')
prop_spec[PropName.REQUIRED] = prop_name in model_def.get(PropName.REQUIRED, [])
return definitions
def _get_model_name(self, method, params):
if method == HTTPMethod.GET:
return self._get_model_name_from_responses(params)
elif method == HTTPMethod.POST or method == HTTPMethod.PUT:
return self._get_model_name_for_post_put_requests(params)
elif method == HTTPMethod.DELETE:
return self._get_model_name_from_delete_operation(params)
else:
return None
@staticmethod
def _return_multiple_items(op_params):
"""
Defines if the operation returns one item or a list of items.
:param op_params: operation specification
:return: True if the operation returns a list of items, otherwise False
"""
try:
schema = op_params[PropName.RESPONSES][SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
return PropName.ITEMS in schema[PropName.PROPERTIES]
except KeyError:
return False
def _get_model_name_from_delete_operation(self, params):
operation_id = params[PropName.OPERATION_ID]
if operation_id.startswith(DELETE_PREFIX):
model_name = operation_id[len(DELETE_PREFIX):]
if model_name in self._definitions:
return model_name
return None
def _get_model_name_for_post_put_requests(self, params):
model_name = None
if OperationField.PARAMETERS in params:
body_param_dict = self._get_body_param_from_parameters(params[OperationField.PARAMETERS])
if body_param_dict:
schema_ref = body_param_dict[PropName.SCHEMA][PropName.REF]
model_name = self._get_model_name_byschema_ref(schema_ref)
if model_name is None:
model_name = self._get_model_name_from_responses(params)
return model_name
@staticmethod
def _get_body_param_from_parameters(params):
return next((param for param in params if param['in'] == 'body'), None)
def _get_model_name_from_responses(self, params):
responses = params[PropName.RESPONSES]
if SUCCESS_RESPONSE_CODE in responses:
response = responses[SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
if PropName.REF in response:
return self._get_model_name_byschema_ref(response[PropName.REF])
elif PropName.PROPERTIES in response:
ref = response[PropName.PROPERTIES][PropName.ITEMS][PropName.ITEMS][PropName.REF]
return self._get_model_name_byschema_ref(ref)
elif (PropName.TYPE in response) and response[PropName.TYPE] == PropType.FILE:
return FILE_MODEL_NAME
else:
return None
def _get_rest_params(self, params):
path = {}
query = {}
operation_param = {
OperationParams.PATH: path,
OperationParams.QUERY: query
}
for param in params:
in_param = param['in']
if in_param == OperationParams.QUERY:
query[param[PropName.NAME]] = self._simplify_param_def(param)
elif in_param == OperationParams.PATH:
path[param[PropName.NAME]] = self._simplify_param_def(param)
return operation_param
@staticmethod
def _simplify_param_def(param):
return {
PropName.TYPE: param[PropName.TYPE],
PropName.REQUIRED: param[PropName.REQUIRED]
}
def _get_model_name_byschema_ref(self, schema_ref):
model_name = _get_model_name_from_url(schema_ref)
model_def = self._definitions[model_name]
if PropName.ALL_OF in model_def:
return self._get_model_name_byschema_ref(model_def[PropName.ALL_OF][0][PropName.REF])
else:
return model_name
class FdmSwaggerValidator:
def __init__(self, spec):
"""
:param spec: dict
data from FdmSwaggerParser().parse_spec()
"""
self._operations = spec[SpecProp.OPERATIONS]
self._models = spec[SpecProp.MODELS]
def validate_data(self, operation_name, data=None):
"""
Validate data for the post|put requests
:param operation_name: string
The value must be non empty string.
The operation name is used to get a model specification
:param data: dict
The value must be in the format that the model(from operation) expects
:rtype: (bool, string|dict)
:return:
(True, None) - if data valid
Invalid:
(False, {
'required': [ #list of the fields that are required but were not present in the data
'field_name',
'patent.field_name',# when the nested field is omitted
'patent.list[2].field_name' # if data is array and one of the field is omitted
],
'invalid_type':[ #list of the fields with invalid data
{
'path': 'objId', #field name or path to the field. Ex. objects[3].id, parent.name
'expected_type': 'string',# expected type. Ex. 'object', 'array', 'string', 'integer',
# 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The data parameter must be a dict' if data neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
if data is None:
data = {}
self._check_validate_data_params(data, operation_name)
operation = self._operations[operation_name]
model = self._models[operation[OperationField.MODEL_NAME]]
status = self._init_report()
self._validate_object(status, model, data, '')
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
return False, self._delete_empty_field_from_report(status)
return True, None
def _check_validate_data_params(self, data, operation_name):
if not operation_name or not isinstance(operation_name, string_types):
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
if not isinstance(data, dict):
raise IllegalArgumentException("The data parameter must be a dict")
if operation_name not in self._operations:
raise IllegalArgumentException("{0} operation does not support".format(operation_name))
def validate_query_params(self, operation_name, params):
"""
Validate params for the get requests. Use this method for validating the query part of the url.
:param operation_name: string
The value must be non empty string.
The operation name is used to get a params specification
:param params: dict
should be in the format that the specification(from operation) expects
Ex.
{
'objId': "string_value",
'p_integer': 1,
'p_boolean': True,
'p_number': 2.3
}
:rtype:(Boolean, msg)
:return:
(True, None) - if params valid
Invalid:
(False, {
'required': [ #list of the fields that are required but are not present in the params
'field_name'
],
'invalid_type':[ #list of the fields with invalid data and expected type of the params
{
'path': 'objId', #field name
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The params parameter must be a dict' if params neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
return self._validate_url_params(operation_name, params, resource=OperationParams.QUERY)
def validate_path_params(self, operation_name, params):
"""
Validate params for the get requests. Use this method for validating the path part of the url.
:param operation_name: string
The value must be non empty string.
The operation name is used to get a params specification
:param params: dict
should be in the format that the specification(from operation) expects
Ex.
{
'objId': "string_value",
'p_integer': 1,
'p_boolean': True,
'p_number': 2.3
}
:rtype:(Boolean, msg)
:return:
(True, None) - if params valid
Invalid:
(False, {
'required': [ #list of the fields that are required but are not present in the params
'field_name'
],
'invalid_type':[ #list of the fields with invalid data and expected type of the params
{
'path': 'objId', #field name
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
'actually_value': 1 # the value that user passed
}
]
})
:raises IllegalArgumentException
'The operation_name parameter must be a non-empty string' if operation_name is not valid
'The params parameter must be a dict' if params neither dict or None
'{operation_name} operation does not support' if the spec does not contain the operation
"""
return self._validate_url_params(operation_name, params, resource=OperationParams.PATH)
def _validate_url_params(self, operation, params, resource):
if params is None:
params = {}
self._check_validate_url_params(operation, params)
operation = self._operations[operation]
if OperationField.PARAMETERS in operation and resource in operation[OperationField.PARAMETERS]:
spec = operation[OperationField.PARAMETERS][resource]
status = self._init_report()
self._check_url_params(status, spec, params)
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
return False, self._delete_empty_field_from_report(status)
return True, None
else:
return True, None
def _check_validate_url_params(self, operation, params):
if not operation or not isinstance(operation, string_types):
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
if not isinstance(params, dict):
raise IllegalArgumentException("The params parameter must be a dict")
if operation not in self._operations:
raise IllegalArgumentException("{0} operation does not support".format(operation))
def _check_url_params(self, status, spec, params):
for prop_name in spec.keys():
prop = spec[prop_name]
if prop[PropName.REQUIRED] and prop_name not in params:
status[PropName.REQUIRED].append(prop_name)
continue
if prop_name in params:
expected_type = prop[PropName.TYPE]
value = params[prop_name]
if prop_name in params and not self._is_correct_simple_types(expected_type, value, allow_null=False):
self._add_invalid_type_report(status, '', prop_name, expected_type, value)
def _validate_object(self, status, model, data, path):
if self._is_enum(model):
self._check_enum(status, model, data, path)
elif self._is_object(model):
self._check_object(status, model, data, path)
def _is_enum(self, model):
return self._is_string_type(model) and PropName.ENUM in model
def _check_enum(self, status, model, value, path):
if value is not None and value not in model[PropName.ENUM]:
self._add_invalid_type_report(status, path, '', PropName.ENUM, value)
def _add_invalid_type_report(self, status, path, prop_name, expected_type, actually_value):
status[PropName.INVALID_TYPE].append({
'path': self._create_path_to_field(path, prop_name),
'expected_type': expected_type,
'actually_value': actually_value
})
def _check_object(self, status, model, data, path):
if data is None:
return
if not isinstance(data, dict):
self._add_invalid_type_report(status, path, '', PropType.OBJECT, data)
return None
if PropName.REQUIRED in model:
self._check_required_fields(status, model[PropName.REQUIRED], data, path)
model_properties = model[PropName.PROPERTIES]
for prop in model_properties.keys():
if prop in data:
model_prop_val = model_properties[prop]
expected_type = model_prop_val[PropName.TYPE]
actually_value = data[prop]
self._check_types(status, actually_value, expected_type, model_prop_val, path, prop)
def _check_types(self, status, actually_value, expected_type, model, path, prop_name):
if expected_type == PropType.OBJECT:
ref_model = self._get_model_by_ref(model)
self._validate_object(status, ref_model, actually_value,
path=self._create_path_to_field(path, prop_name))
elif expected_type == PropType.ARRAY:
self._check_array(status, model, actually_value,
path=self._create_path_to_field(path, prop_name))
elif not self._is_correct_simple_types(expected_type, actually_value):
self._add_invalid_type_report(status, path, prop_name, expected_type, actually_value)
def _get_model_by_ref(self, model_prop_val):
model = _get_model_name_from_url(model_prop_val[PropName.REF])
return self._models[model]
def _check_required_fields(self, status, required_fields, data, path):
missed_required_fields = [self._create_path_to_field(path, field) for field in
required_fields if field not in data.keys() or data[field] is None]
if len(missed_required_fields) > 0:
status[PropName.REQUIRED] += missed_required_fields
def _check_array(self, status, model, data, path):
if data is None:
return
elif not isinstance(data, list):
self._add_invalid_type_report(status, path, '', PropType.ARRAY, data)
else:
item_model = model[PropName.ITEMS]
for i, item_data in enumerate(data):
self._check_types(status, item_data, item_model[PropName.TYPE], item_model, "{0}[{1}]".format(path, i),
'')
@staticmethod
def _is_correct_simple_types(expected_type, value, allow_null=True):
def is_numeric_string(s):
try:
float(s)
return True
except ValueError:
return False
if value is None and allow_null:
return True
elif expected_type == PropType.STRING:
return isinstance(value, string_types)
elif expected_type == PropType.BOOLEAN:
return isinstance(value, bool)
elif expected_type == PropType.INTEGER:
is_integer = isinstance(value, integer_types) and not isinstance(value, bool)
is_digit_string = isinstance(value, string_types) and value.isdigit()
return is_integer or is_digit_string
elif expected_type == PropType.NUMBER:
is_number = isinstance(value, (integer_types, float)) and not isinstance(value, bool)
is_numeric_string = isinstance(value, string_types) and is_numeric_string(value)
return is_number or is_numeric_string
return False
@staticmethod
def _is_string_type(model):
return PropName.TYPE in model and model[PropName.TYPE] == PropType.STRING
@staticmethod
def _init_report():
return {
PropName.REQUIRED: [],
PropName.INVALID_TYPE: []
}
@staticmethod
def _delete_empty_field_from_report(status):
if not status[PropName.REQUIRED]:
del status[PropName.REQUIRED]
if not status[PropName.INVALID_TYPE]:
del status[PropName.INVALID_TYPE]
return status
@staticmethod
def _create_path_to_field(path='', field=''):
separator = ''
if path and field:
separator = '.'
return "{0}{1}{2}".format(path, separator, field)
@staticmethod
def _is_object(model):
return PropName.TYPE in model and model[PropName.TYPE] == PropType.OBJECT
| gpl-3.0 | 8,751,988,325,963,484,000 | 40.769592 | 119 | 0.573455 | false |
vc3-project/vc3-info-service | vc3infoservice/core.py | 1 | 11353 | #!/bin/env python
__author__ = "John Hover"
__copyright__ = "2017 John Hover"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.1"
__maintainer__ = "John Hover"
__email__ = "[email protected]"
__status__ = "Production"
import logging
import random
import string
class InfoConnectionFailure(Exception):
'''
Network connection failure exception.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoMissingPairingException(Exception):
'''
Exception thrown when a pairing code is invalid, either because it never existed
or the pairing has already been retrieved.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityExistsException(Exception):
'''
Exception thrown when an attempt to create an entity with a
name that already exists. Old entity must be deleted first.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityMissingException(Exception):
'''
Exception thrown when an attempt to get a non-existent entity is made.
Entity must be created before it can be updated.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoEntityUpdateMissingException(Exception):
'''
Exception thrown when an attempt to *update* a non-existent entity is made.
Entity must be created before it can be updated.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoAttributeFacade(object):
'''
Intercepts __setattr__ one level down for InfoEntities.
'''
def __init__(self, parent, attrname):
log = logging.getLogger()
object.__setattr__(self, '_parent', parent)
object.__setattr__(self, '_attrname', attrname)
log.debug("Facade made for attribute %s parent %s" % (attrname, parent))
def __setattr__(self, name, value):
'''
'''
log = logging.getLogger()
if name in self.__class__.infoattributes:
try:
diffmap = self._diffmap
except AttributeError:
diffmap = {}
for at in self.__class__.infoattributes:
diffmap[at] = 0
object.__setattr__(self,'_diffmap', diffmap)
diffmap[name] += 1
log.debug('infoattribute %s incremented to %s' % ( name, diffmap[name] ) )
else:
log.debug('non-infoattribute %s' % name)
object.__setattr__(self, name, value)
def __getattr__(self, attrname):
return object.__getattr__(self, name)
class InfoEntity(object):
'''
Template for Information entities. Common functions.
Classes that inherit from InfoEntity must set class variables to describe handling.
'''
infokey = 'unset'
infoattributes = []
intattributes = []
validvalues = {}
nameattributes = ['name']
def __setattr__(self, name, value):
'''
_difflist List of (info)attributes that have been changed (not just
initialized once.
'''
log = logging.getLogger()
if name in self.__class__.infoattributes:
try:
diffmap = self._diffmap
except AttributeError:
diffmap = {}
for at in self.__class__.infoattributes:
diffmap[at] = 0
object.__setattr__(self,'_diffmap', diffmap)
diffmap[name] += 1
else:
log.debug('non-infoattribute %s' % name)
object.__setattr__(self, name, value)
#def __getattr__(self, name):
# '''
# To be on the safe side, we track attributes that have been retrieved.
# Client may alter an object that is the value of the attribute.
# '''
# log = logging.getLogger()
# if name in self.__class__.infoattributes:
# try:
# diffmap = self._diffmap
# except AttributeError:
# diffmap = {}
# for at in self.__class__.infoattributes:
# diffmap[at] = 0
# object.__setattr__(self,'_diffmap', diffmap)
# diffmap[name] += 1
# log.debug('infoattribute %s' % name)
# else:
# log.debug('non-infoattribute %s' % name)
# object.__getattr__(self, name)
def getDiffInfo(self):
'''
Return a list of info attributes which have been set > 1 time.
'''
retlist = []
try:
diffmap = self._diffmap
except AttributeError:
pass
for a in diffmap.keys():
if diffmap[a] > 1:
retlist.append(a)
return retlist
def __repr__(self):
s = "%s( " % self.__class__.__name__
for a in self.__class__.infoattributes:
val = getattr(self, a, None)
if isinstance(val, str) or isinstance(val, unicode):
if len(val) > 80:
s+="%s=%s... " % (a, val[:25] )
else:
s+="%s=%s " % (a, val )
else:
s+="%s=%s " % (a, val )
s += ")"
return s
def makeDictObject(self, newonly=False):
'''
Converts this Python object to attribute dictionary suitable for addition to existing dict
intended to be converted back to JSON. Uses <obj>.name as key:
'''
d = {}
d[self.name] = {}
if newonly:
# only copy in values that have been re-set after initialization
self.log.debug("newonly set, getting diff info...")
difflist = self.getDiffInfo()
for attrname in difflist:
d[self.name][attrname] = getattr(self, attrname)
else:
# copy in all infoattribute values
self.log.debug("newonly not set, doing all values...")
for attrname in self.infoattributes:
d[self.name][attrname] = getattr(self, attrname)
self.log.debug("Returning dict: %s" % d)
return d
def setState(self, newstate):
self.log.debug("%s object name=%s %s ->%s" % (self.__class__.__name__, self.name, self.state, newstate) )
self.state = newstate
def store(self, infoclient):
'''
Updates this Info Entity in store behind given infoclient.
'''
keystr = self.__class__.infokey
validvalues = self.__class__.validvalues
for keyattr in validvalues.keys():
validlist = validvalues[keyattr]
attrval = getattr(self, keyattr)
if attrval not in validlist:
self.log.warning("%s entity has invalid value '%s' for attribute '%s' " % (self.__class__.__name__,
attrval, keyattr) )
#resources = infoclient.getdocumentobject(key=keystr)
if hasattr(self, 'storenew'):
entdict = self.makeDictObject(newonly=False)
self.log.debug("Dict obj: %s" % entdict)
infoclient._storeentitydict(keystr, entdict )
else:
entdict = self.makeDictObject(newonly=True)
self.log.debug("Dict obj: %s" % entdict)
infoclient._mergeentitydict(keystr, entdict )
self.log.debug("Stored entity %s in key %s" % (self.name, keystr))
def addAcl(self, aclstring):
pass
def removeAcl(self, aclstring):
pass
def getClone(self, newname = None):
'''
Make new identical object with new name attribute.
'''
self.log.debug("making clone of %s object name=%s " % (self.__class__.__name__, self.name) )
dictobject = self.makeDictObject() # has name as index of attribute dict
dict = dictobject[self.name]
if newname is not None:
dict['name'] = newname
else:
dict['name'] = self.generateName()
self.log.debug('new dict is %s' % dict)
newobj = self.__class__.objectFromDict(dict)
newobj.storenew = True
self.log.debug('new object is %s' % newobj)
return newobj
def generateName(self, length=8):
'''
Make new name attribute appropriate to this object.
For parent InfoEntity, just generate a random string...
'''
self.log.debug("Generating name...")
randomstr = InfoEntity.randomChars(length)
self.log.debug("Got random part %s" % randomstr)
newname = ""
for na in self.__class__.nameattributes:
self.log.debug("Building name with %s " % na)
newname += InfoEntity.normalizeAttribute(getattr(self, na))
newname += "-%s" % randomstr
return newname
@classmethod
def objectFromDict(cls, dict):
'''
Returns an initialized Entity object from dictionary.
Input: Dict:
{
"name" : "<name>",
"att1" : "<val1>"
}
'''
log = logging.getLogger()
log.debug("Making object from dictionary...")
#name = dict.keys()[0]
#d = dict[name]
d = dict
args = {}
for key in cls.infoattributes:
try:
args[key] = d[key]
except KeyError, e:
args[key] = None
log.warning("Document object does not have a '%s' key" % e.args[0])
for key in cls.intattributes:
try:
if args[key] is not None:
args[key] = int(args[key])
except KeyError, e:
log.warning("Document object does not have a '%s' key" % e.args[0])
eo = cls(**args)
log.debug("Successfully made object from dictionary, returning...")
return eo
@classmethod
def randomChars(cls, length=5):
log = logging.getLogger()
log.debug("Generating random chars...")
randomstr = ''.join([random.choice(string.ascii_lowercase) for n in xrange(length)])
return randomstr
@classmethod
def normalizeAttribute(cls, value):
log = logging.getLogger()
log.debug("Normalizing %s " % value)
v = str(value)
v = v.lower()
v = v.replace(" ","")
v= v[0:16]
log.debug("Value normalized to %s" % v)
return v
class InfoPersistencePlugin(object):
def __init__(self, parent, config, section ):
self.log = logging.getLogger()
self.lock = MockLock()
self.parent = parent
self.config = config
self.section = section
class MockLock(object):
'''
Provided as a convenience for persistence back ends that don't require atomic operations.
'''
def acquire(self):
pass
def release(self):
pass
| gpl-3.0 | 6,548,519,347,135,075,000 | 31.907246 | 201 | 0.534044 | false |
2013Commons/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Prof.py | 37 | 75689 | #._cv_part guppy.heapy.Prof
from Tkinter import *
import tkFileDialog
import tkMessageBox
class MyVar(StringVar):
_default = 0.0
def set(self, value):
StringVar.set(self, '%.2g'%value)
suffixes = ('','K','M','G','T')
def sizestring(value):
value = float(value)
sign = 1
if value < 0:
sign = -1
value = - value
i = 0
while value > 99999:
value /= 1000
i += 1
s = str(int(round(value)))+suffixes[i]
if s.endswith('000'+suffixes[i]):
s = str(int(round(value/1000)))+suffixes[i+1]
if sign == -1:
s = '-' + s
return s
def percentstring(value):
a = abs(value)
if 10 <= a <= 9999:
return '%d'%round(value)
elif 0.01 <= a <= 10:
return '%.2g'%value
elif a <= 1e-10:
return '0'
else:
return '%.0e'%value
def stringsize(s):
if s.isdigit():
return int(s)
suf = s[-1:].upper()
mult = 1000l
for su in suffixes[1:]:
if su == suf:
break
mult *= 1000
else:
raise ValueError
return int(s[:-1])*mult
class Menu(Menu):
# A fix for the .delete() method in Menu.
# To delete commands defined in the menu items deleted.
# Also changed the comment: INDEX2 is actually INCLUDED.
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
# First find out what entries have defined commands.
cmds = []
for i in range(self.index(index1), self.index(index2)+1):
c = str(self.entrycget(i, 'command'))
if c in self._tclCommands:
# I don't want to delete the command already, since it
# seems mystical to do that while the entry is not yet deleted.
cmds.append(c)
# Delete the menu entries.
self.tk.call(self._w, 'delete', index1, index2)
# Now that the menu entries have been deleted,
# we can delete their commands.
for c in cmds:
self.deletecommand(c)
class SizeVar(StringVar):
_default = 0.0
def set(self, value):
self._value = value
s = sizestring(value)
StringVar.set(self, s)
class ValueLabel(Label):
def __init__(self, *args, **kwds):
kwds['width']=10
Label.__init__(self, *args, **kwds)
class ClickButton(Button):
# Button that runs the command directly at the click, not at release.
# And has auto-repeat.
def __init__(self, master, command, firstdelay=500,thendelay=150, **kwds):
Button.__init__(self, master, **kwds)
self._command = command
self._firstdelay = firstdelay
self._thendelay = thendelay
self.bind('<Button-1>', self._event_button)
self.bind('<ButtonRelease-1>', self._event_release)
def _event_button(self, event=None):
self._command()
if event is not None:
delay = self._firstdelay
else:
delay = self._thendelay
self._after = self.after(delay, self._event_button)
def _event_release(self, event):
self.after_cancel(self._after)
del self._after
class Stats:
def __init__(self, mod, fn=None):
self.mod = mod
self.os = mod.os
self.md5 = mod.md5
self.fn = fn
def clear_cache(self):
# It is intended to be transparently
# automagically reopened when needed.
self.stats = None
del self.stats
def get_stats(self):
self.open(self.fn)
return self.stats
stats = property(get_stats)
def collect(self):
if not self.fn:
return 0,0
stat = self.os.stat(self.fn)
if stat == self.laststat:
return len(self), 0
f = open(self.fn)
str = f.read(self.lastfilesize)
md5 = self.md5.md5(str)
digest = md5.digest()
if digest == self.lastdigest:
numoldstats = len(self)
else:
self.loadstr(str, reset=1)
numoldstats = 0
str = f.read()
self.laststat = self.os.fstat(f.fileno())
f.close()
self.lastfilesize = self.laststat.st_size
md5.update(str)
self.lastdigest = md5.digest()
self.loadstr(str)
numnewstats = len(self.stats)-numoldstats
return numoldstats, numnewstats
def open(self, fn):
if not fn:
self.len_stats = 0
self.stats = []
self.max_size = 0
self.fn = fn
return
f = open(fn)
str = f.read()
lastdigest = self.md5.md5(str).digest()
laststat = self.os.fstat(f.fileno())
f.close()
self.loadstr(str, reset=1)
# Update these only if there was no exception so far.
self.fn = fn
self.lastdigest = lastdigest
self.laststat = laststat
self.lastfilesize = laststat.st_size
def loadstr(self, str, reset=0):
stats = []
lines = str.split('\n')
del str
linesiter = iter(lines)
max_size = 0
while 1:
try:
st = self.mod.Use.load(linesiter)
except StopIteration:
break
stats.append(st)
if st.size > max_size:
max_size = st.size
# Only update self if there were no exception so far
if reset:
self.stats = []
self.max_size = 0
self.max_size = max(self.max_size, max_size)
self.stats.extend(stats)
self.len_stats = len(self.stats)
def __getitem__(self, idx):
return self.stats[idx]
def __len__(self):
try:
return self.len_stats
except AttributeError:
self.len_stats = len(self.stats)
return self.len_stats
def get_max_size(self):
return self.max_size
class ProfileRow:
kindwidth = 30
def __init__(self, master, row, usecolor=1):
self.master = master
self.row = row
if usecolor:
colbg = Frame(master=master,bg='black',width=1, borderwidth=1, relief=GROOVE)
self.color = Label(master=colbg,bg='white',width=1, borderwidth=1, relief=GROOVE)
self.color.grid(row=0, column=0)
colbg.grid(row=row,column=0, sticky=NW)
self.rsizevar = SizeVar()
self.rsize = Label(master=master, textvariable=self.rsizevar, width=6,anchor=E)
self.rpercentvar = StringVar() #BBIntVar()
self.rpercent = Label(master=master,textvariable=self.rpercentvar, width=3,anchor=E)
self.dsizevar = SizeVar()
self.dsize = Label(master=master, textvariable=self.dsizevar, width=6,anchor=E)
self.dpercentvar = StringVar() #BBIntVar()
self.dpercent = Label(master=master,textvariable=self.dpercentvar, width=3,anchor=E)
self.kindvar = StringVar()
self.kind = Label(master=master, textvariable=self.kindvar, anchor=NW,
width=self.kindwidth ,justify=LEFT)
self.rsize.grid(row=row, column=1, sticky=NE)
self.rpercent.grid(row=row,column=2,sticky=NE)
self.dsize.grid(row=row,column=3,sticky=NE)
self.dpercent.grid(row=row,column=4,sticky=NE)
self.kind.grid(row=row, column=5, sticky=NW)
def set_color_size_percent_kind(self, color, rsize, rpercent, dsize, dpercent, kind):
self.set_color(color)
if color is not None:
self.set_color(color)
self.rsizevar.set(rsize)
if rpercent is None:
rpercent = ''
else:
rpercent = str(int(round(rpercent)))
self.rpercentvar.set(rpercent)
self.dsizevar.set(dsize)
dpercent = str(int(round(dpercent)))
self.dpercentvar.set(dpercent)
self.set_kind(kind)
def set_color(self, color):
self.color.configure(bg=color)
def set_kind(self, kind):
self.kindtext = kind
if len(kind) > self.kindwidth:
import textwrap
kind = textwrap.fill(kind, width=self.kindwidth)
self.kindvar.set(kind)
def clear(self):
self.set_color_size_percent_kind(self.master['bg'], 0, 0, 0, 0, '--')
class AxisControl:
scale_table = [1l, 2l, 5l]
while scale_table[-1] < 1e12:
scale_table.append(scale_table[-3] * 10l)
def __init__(self, master,
name,
range,
grid,
unit,
rangecommand,
gridcommand,
autocommand=None
):
small = 0
self.name = name
self.unit = unit
self.range = range
self.rangecommand = rangecommand
self.frame = frame = Frame(master,borderwidth=2,relief=GROOVE)
self.rangevar = SizeVar()
self.rangevar.set(range)
if 1:
rangeval = Entry(master=self.frame,
# anchor=E,
width=4,
textvar=self.rangevar,
#font=('fixed', '16', 'bold'),
#font=('terminal', '16', 'bold'),
#font=('terminal', '14'),
font=('fixed', '14'),
#bg='black',fg='yellow'
bg='#fdd'
)
rangeval.bind('<KeyPress-Return>',self.event_range_enter)
elif 1:
rangeval = Button(master=self.frame,
anchor=E,
width=4,
textvar=self.rangevar,
#font=('fixed', '16', 'bold'),
font=('terminal', '16', 'bold'),
bg='black',fg='yellow')
else:
rangeval = Listbox(
self.frame,
height=1,
width=4,
font=('terminal', '16', 'bold'),
bg='black',fg='yellow')
for scale in self.scale_table:
s = sizestring(scale)
rangeval.insert(0, s)
namelabel = Menubutton(frame, text=name, relief='raised', anchor=W)
namemenu = Menu(namelabel)
namelabel['menu']=namemenu
if autocommand:
self.autovar = BooleanVar()
self.autovar.set(True)
namemenu.add_checkbutton(
#autobutton = Checkbutton(frame,
label='Auto',
variable=self.autovar,
command = autocommand,
#relief=RAISED
)
autobutton = Checkbutton(frame,
text='Auto',
variable=self.autovar,
command = autocommand,
relief=RAISED
)
else:
self.autovar = None
if gridcommand:
self.gridvar = BooleanVar()
self.gridvar.set(grid)
namemenu.add_checkbutton(
label='Grid',
variable=self.gridvar,
command = lambda: gridcommand(self.gridvar.get()),
)
gridbutton = Checkbutton(frame,
text='Grid',
variable=self.gridvar,
command = lambda: gridcommand(self.gridvar.get()),
relief=RAISED
)
rangelabel = Label(frame, text='Range')
if name == 'Y' and small:
padx = 5
pady = 0
else:
padx = 3
pady = 3
ud = Frame(frame)
rangeup = ClickButton(ud, text='+',
pady=pady,padx=padx,
font=('fixed',8),
command=lambda:self.range_button(1))
rangedown = ClickButton(ud, text='-',
pady=pady,padx=padx,
font=('fixed',8),
command=lambda:self.range_button(-1))
rangedown.grid(row=0,column=0)
rangeup.grid(row=0,column=1)
row=0
if small and name == 'Y':
namelabel.grid(row=0, rowspan=1,column=0)
rangeup.grid(row=0, column=1, sticky=W)
autobutton.grid(row=1,column=0)
rangedown.grid(row=1, column=1, sticky=W)
rangeval.grid(row=2, column=0, columnspan=2,sticky=W,padx=3, pady=3)
elif small and name == 'X':
namelabel.grid(row=0, column=0)
rangeval.grid(row=0, column=1,sticky=W,padx=3, pady=3)
rangedown.grid(row=0, column=2, sticky=W)
rangeup.grid(row=0, column=3, sticky=W)
else:
namelabel.grid(row=row, column=0, sticky=N+W,ipadx=0,ipady=0,padx=2,pady=2)
rangelabel.grid(row=row, column=1, sticky=W)
ud.grid(row=row,column=2, padx=2)
row += 1
if gridcommand:
gridbutton.grid(row=row, column=0, sticky=W)
rangeval.grid(row=row, column=1, padx=3, pady=3)
if autocommand:
pass
autobutton.grid(row=row,column=2)
def cmd_range(self):
pass
def event_range_enter(self, event):
str = self.rangevar.get()
try:
rng = stringsize(str)
if rng not in self.scale_table:
if not 1 <= rng <= self.scale_table[-1]:
raise ValueError
except:
self.frame.bell()
self.errorbox("""\
Invalid range entry.
It should be a positive integer with an optional multiplier:
K, M, G, or T
(1000, 1e6, 1e9, 1e12)
Maximum range is 1T.""")
self.rangevar.set(self.range)
else:
if self.autovar:
self.autovar.set(False)
self.setrange(rng)
def auto_command(self):
pass
def errorbox(self, msg):
tkMessageBox.showerror(master=self.frame, message=msg)
def fit(self, range):
range = self.scale_by_table(range)
self.setrange(range)
def range_button(self, d):
if self.autovar:
self.autovar.set(False)
self.range_change(d)
def range_change(self, d):
range = self.range
srange = self.scale_by_table(range)
if srange > range:
if d > 0:
d -= 1
i = self.scale_table.index(srange)
i += d
if i >= len(self.scale_table):
i = len(self.scale_table) - 1
if i < 0:
i = 0
self.setrange(self.scale_table[i])
def setrange(self, range):
if range != self.range:
self.range = range
self.rangevar.set(range)
self.rangecommand(range)
def scale_by_table(self, s):
# Return the scale from table that is higher or equal to s
for ts in self.scale_table:
if ts >= s:
return ts
return self.scale_table[-1]
WM = 1
class Marker:
def __init__(self, d, tag, name, pos, poscommand=None):
self.d = d
self.tag = tag
self.name = name
self.xmarker = pos
self.butdown = 0
self.ocursor = d.ocursor
self.cursor = self.ocursor
self.poscommand = None
self.intpos = None
self.moving = 0
self.selected = 0
self.entered = 0
self.butdownselected = 0
self.motion_id = None
self.create()
def bind(self, sequence, function):
tag = self.tag
self.d.drawingarea.tag_bind(tag, sequence, function)
if WM:
self.xlabel.bind(sequence, function)
else:
self.d.xmarks.tag_bind(tag, sequence, function)
def coords(self, canx):
self.d.drawingarea.coords(self.tag,
canx, 0,
canx,-int(self.d.boty))
self.d.xmarks.coords(self.tag, canx, 10)
def create(self):
tag = self.tag
text = self.name
pos = 0
if 1:
self.d.drawingarea.create_line(pos, 0, pos, 20-self.d.boty, stipple='gray12',
width=4,tags=(tag,))
if WM:
label = self.xlabel = Label(self.d.xmarks, text=text, padx=2,pady=2,relief=RAISED)
self.d.xmarks.create_window(pos, 0, window=label, tags=(tag,))
else:
self.d.xmarks.create_text(pos, 0, text=text, tags=(tag,))
self.bind('<Button-1>', self.event_button_1)
self.bind('<ButtonRelease-1>', self.event_button_1_release)
self.bind('<Enter>', self.event_enter)
self.bind('<Leave>', self.event_leave)
self.d.drawingarea.bind('<Enter>', self.event_enter_movearea, add='+')
self.d.drawingarea.bind('<Button-1>', self.event_button_1_movearea, add='+')
def event_button_1(self, event):
self.butdown = 1
if self.selected:
self.butdownselected = 1
if self.moving:
self.event_stop_move(event)
else:
self.butdownselected = 0
self.has_moved = 0
self.event_selected(event)
self.event_start_move(event)
def event_button_1_movearea(self, event):
if not self.entered:
self.event_deselected(event)
def event_button_1_release(self, event):
self.butdown = 0
if self.has_moved == self.butdownselected:
if self.selected:
if self.moving and not (self.disloy <= event.y_root < self.dishiy):
self.event_stop_move(None)
self.setcursor(self.ocursor)
else:
self.setcursor(self.ocursor)
return
self.event_deselected(event)
def event_deselected(self, event):
if self.selected:
self.selected = 0
self.xlabel['relief'] = RAISED
if self.moving:
self.event_stop_move(event)
def event_enter(self, event):
self.entered = 1
if not self.moving:
if self.selected:
self.event_start_move(event)
else:
self.setcursor('hand2')
def event_enter_movearea(self, event):
if self.selected and not self.moving:
self.event_start_move(event)
def event_leave(self, event):
self.entered = 0
if not self.moving:
self.setcursor(self.ocursor)
elif not (self.fraloy <= event.y_root < self.frahiy):
pass
def event_motion(self, event):
self.has_moved = 1
if 0: # Simple variant - get back
if not (self.fraloy <= event.y_root < self.frahiy):
self.event_button_1_release(self.down_event)
return
inside = (self.fraloy <= event.y_root < self.frahiy)
if inside != self.inside:
self.inside = inside
if not inside:
self.out_event = event
self.event_stop_move(None)
if self.butdown:
self.setcursor('circle')
self.d.bind_motion(self.event_motion_downout)
else:
self.in_event = event
#self.delta += self.out_event.x_root - event.x_root
self.event_start_move(event)
return
if inside:
self.moved(event)
self.setxvars()
def event_motion_downout(self, event):
# We don't get an enter while button is pressed down
# Emulate an enter if we detect entering
inside = (self.fraloy <= event.y_root < self.frahiy)
if inside:
self.d.unbind_motion(self.event_motion_downout)
self.event_enter_movearea(event)
def event_selected(self, event):
for m in self.d.marks:
m.event_deselected(event)
self.selected = 1
self.xlabel['relief'] = SUNKEN
def event_start_move(self, event):
self.moving = 1
self.fralox = self.d.frame.winfo_rootx()
self.frahix = self.fralox + self.d.frame.winfo_width()
self.fraloy = self.d.frame.winfo_rooty()
self.frahiy = self.fraloy + self.d.frame.winfo_height()
self.dislox = self.d.drawingarea.winfo_rootx()
self.dishix = self.dislox + self.d.drawingarea.winfo_width()
self.disloy = self.d.drawingarea.winfo_rooty()
self.dishiy = self.disloy + self.d.drawingarea.winfo_height()
self.down_event = event
self.prev_event = event
self.down_xmarker = self.xmarker
self.down_xvfrac = self.d.drawingarea.xview()[0]
self.inside = 1
self.delta = 0
self.lift()
self.motion_id = self.d.bind_motion(self.event_motion)
self.moved(event)
def event_stop_move(self, event):
assert self.moving
self.moving = 0
self.d.unbind_motion(self.motion_id)
if event is not None:
self.moved(event)
self.setxvars()
if self.entered and not self.selected:
self.setcursor('hand2')
else:
self.setcursor(self.ocursor)
def lift(self):
self.d.xmarks.tag_raise(self.tag)
if WM:
self.xlabel.lift()
self.d.drawingarea.tag_raise(self.tag)
def move(self, sample):
canx = self.d.canxscaled(sample)
self.d.xview_pos(canx)
self.coords(canx)
self.xmarker = sample
self.lift()
def moved(self, event):
curx = event.x_root
cury = event.y_root
prevx = self.prev_event.x_root
if prevx > self.dishix and curx < self.dishix:
prevx = self.dishix
elif prevx < self.dislox and curx > self.dislox:
prevx = self.dislox
markx = self.d.canxscaled(self.xmarker) - self.d.drawingarea.canvasx(0) + self.dislox
dx = curx - prevx
l = r = 1
if self.xmarker >= self.d.numstats-1:
r = 0
if self.xmarker <= 0:
l = 0
stop = 0
# Should we allow to move it back or not
# if it is at an endpoint?
# Here we don't move it at all, to make marker pos correspond
# more closely with mouse position.
if ((r == 0 and curx > markx) or (l == 0 and curx < markx)):
l = r = 0
if self.butdown:
if curx > self.dishix:
l = 0
elif curx < self.dislox:
r = 0
else:
if not (self.dislox <= curx < self.dishix and
self.disloy <= cury < self.dishiy):
l = r = 0
stop = 1
if l and r:
self.setcursor('sb_h_double_arrow')
elif l:
self.setcursor('sb_left_arrow')
if dx > 0:
dx = 0
elif r:
self.setcursor('sb_right_arrow')
if dx < 0:
dx = 0
else:
self.setcursor('dot')
dx = 0
self.prev_event = event
sample = self.d.limitx(self.xmarker + dx / self.d.xscale)
canx = self.d.canxscaled(sample)
self.d.xview_pos(canx)
self.coords(canx)
self.xmarker = sample
if stop and self.moving:
self.event_stop_move(None)
def set(self):
canx = self.d.canxscaled(self.xmarker)
self.coords(canx)
self.lift()
def set_poscommand(self, command):
self.poscommand = command
self.intpos = None
def setcursor(self, cursor):
if cursor != self.cursor:
self.xlabel['cursor'] = cursor
self.cursor = cursor
self.d.setcursor(cursor)
def setxvars(self):
if self.poscommand:
intpos = int(round(self.xmarker))
if intpos != self.intpos:
self.intpos = intpos
self.poscommand(intpos)
class Display:
orgwidth = 300
orgheight = 300
minwidth = 30
minheight = 30
def __init__(self, master,
scale_table,
numkindrows,
getkindcolor,
xrange=100,
yrange=100,
xgrid = False,
ygrid = False,
graphtype = 'Bars',
statype = 'Size',
):
self.master = master
self.scale_table = scale_table
self.numkindrows = numkindrows
self.getkindcolor = getkindcolor
self.xrange = xrange
self.yrange = yrange
self.xgrid = xgrid
self.var_xgrid = BooleanVar(xgrid)
self.var_xgrid.set(xgrid)
self.var_ygrid = BooleanVar(xgrid)
self.ygrid = ygrid
self.var_ygrid.set(ygrid)
self.graphtype = graphtype
self.statype = statype
self.numstats = 0
self.ymaxs = []
self.ymins = []
self.ymax = 1
# To get around problems with dynamic unbinding / unbinding of motion,
# I handle it myself. in the bind_motion method using the following.
self.bound_motions = {}
self.event_motion_id = None
#
self.frame = frame = Frame(master,
borderwidth=3,
relief=SUNKEN,
#relief=GROOVE,
#background='green'
)
#self.frame = frame = Frame(master,background='green')
bordercolor = '#ccc'
screencolor = '#e0e0e0'
xscrollincrement = 1
frame = Frame(self.frame)
frame.grid(row=0,column=0)
#move = Frame(frame, height=10,width=10,background='red', relief=RAISED)
#move = Button(self.frame, height=10,width=10,background='red')
self.drawingarea = C = Canvas(frame,
width=self.orgwidth,
height=self.orgheight,
xscrollincrement=xscrollincrement,
#background='black',
background = screencolor,
bd=0,
xscrollcommand = self.xscrollbar_set,
#confine=False,
)
#self.yctrlframe = Frame(frame, borderwidth=2,relief=GROOVE)
self.yscrollbar = Scrollbar(frame, orient = VERTICAL, width=10)
#self.yscrollbar['command']=self.drawingarea.yview
#self.drawingarea['yscrollcommand'] = self.yscrollbar_set
#self.yscrollbar.pack(side=RIGHT,fill=Y)
#self.yctrlframe.grid(row = 0, column = 0,sticky=N+S,padx=3,pady=3)
self.xaxis = Canvas(frame,
width=C['width'],
height=20,
xscrollincrement=xscrollincrement,
bd=0,
background = bordercolor,
#xscrollcommand = self.xscrollbar_set
#confine=False,
)
self.xmarks = Canvas(frame,
width=C['width'],
height=20,
xscrollincrement=xscrollincrement,
bd=0,
background = bordercolor,
#xscrollcommand = self.xscrollbar_set
#confine=False,
)
self.yaxis = Canvas(frame, height=C['height'],width=50,
bd=0,
background = bordercolor,
)
self.xscrollbar = Scrollbar(frame, orient=HORIZONTAL,
command=self.drawingarea_xview,
width=12,
background = bordercolor,
)
xy = Canvas(frame, width=50,height=20,bd=0,
background = bordercolor,
)
#
if 0:
self.yaxis.grid(row = 0, column = 0)
self.yscrollbar.grid(row=0,column=2, sticky=N+S)
C.grid(row = 0, column = 1, sticky=W+E )
xy.grid(row=1,column=0)
self.xaxis.grid(row = 1, column = 1)
self.xscrollbar.grid(row=2,column=1,sticky=E+W)
self.rsbut.grid(row=2,column=2)
else:
var_yrange = SizeVar()
self.var_yrange = var_yrange
row = 0
Label(frame,
textvar=var_yrange,
bd=0,
relief=FLAT,
background=bordercolor).grid(
row=row,
column=0,
sticky=W+E+N+S)
self.xscrollbar.grid(row=row,column=1,sticky=E+W)
row += 1
self.yunit = Label(frame,
text='Bytes',
bd=0,
relief=FLAT,
background=bordercolor)
self.yunit.grid(
row=row,
column=0,
sticky=W+E+N+S)
self.xmarks.grid(row=row, column=1,sticky=W+E+N)
row += 1
self.yaxis.grid(row = row, column = 0)
C.grid(row = row, column = 1, sticky=W+E )
row += 1
xy.grid(row=row,column=0)
self.xaxis.grid(row = row, column = 1,sticky=W+E+N)
#
self.botx = float(C['width'])
self.boty = float(C['height'])
self.chdim = self.getchdim()
self.canx0 = 0
self.tmax = 0
self.xscale = self.botx / self.xrange
self.yscale = self.boty / self.yrange
self.xi0 = None
xy.create_line(0,2,44,2)
xy.create_line(49, 6,49,22)
xy.create_text(25, 14, text='Sample')
self.setscrollregion()
self.ocursor = self.drawingarea['cursor']
self.cursor = self.ocursor
self.marks = []
def bind_motion(self, function):
if self.event_motion_id == None:
self.event_motion_id = self.frame.bind_all('<Motion>', self.event_motion, add='+')
self.bound_motions[function] = self.bound_motions.get(function, 0) + 1
return function
def event_motion(self, event):
for f in self.bound_motions.keys():
f(event)
def unbind_motion(self, funcid):
n = self.bound_motions[funcid] - 1
if n == 0:
del self.bound_motions[funcid]
else:
self.bound_motions[funcid] = n
def new_xmarker(self, name = None, pos=0):
tag = 'M%d'%len(self.marks)
if name is None:
name = tag
m = Marker(self, tag, name, pos)
self.marks.append(m)
return m
def canxscaled(self, x):
return x * self.xscale + self.canx0
def canyscaled(self, y):
return - y * self.yscale
def cmd_xgrid(self):
self.xgrid = self.var_xgrid.get()
self.drawxaxis()
def cmd_ygrid(self):
self.ygrid = self.var_ygrid.get()
self.drawyaxis()
def cmd_yrange_auto(self):
self.ymax = None
self.yrange_auto()
def limitx(self, x):
lo = 0
hi = max(0, self.numstats-1)
if x < lo:
return lo
if x > hi:
return hi
return x
def resize(self, dx, dy):
x = self.botx + dx
y = self.boty + dy
if x < self.minwidth:
x = self.minwidth
dx = x - self.botx
if y < self.minheight:
y = self.minheight
dy = y - self.boty
xv = self.drawingarea.xview()
yv = self.drawingarea.yview()
self.drawingarea.configure(width=x, height=y)
self.xaxis.configure(width=x)
self.xmarks.configure(width=x)
self.yaxis.configure(height=y)
xscale = float(x) / self.xrange
yscale = float(y) / self.yrange
xscaleorg = self.drawingarea.canvasx(0)
yscaleorg = 0
xq = xscale / self.xscale
yq = yscale / self.yscale
self.drawingarea.scale("all",xscaleorg, yscaleorg, xq, yq)
#self.drawingarea.scale("barsep",xscaleorg, yscaleorg, xq, yq)
#self.drawingarea.scale("xmarker",xscaleorg, yscaleorg, xq, yq)
self.canx0 = xscaleorg + (self.canx0 - xscaleorg) * xq
self.botx = x
self.boty = y
self.xscale = xscale
self.yscale = yscale
self.drawxaxis()
self.drawyaxis()
self.setscrollregion()
# If the size changed much, the canvas may scroll though it shouldn't.
# Notes 11 and 26 Oct 2005 .
# I save the current scroll position.
# The caller has to call the .moveback() method some time later.
self.wantedpos = xv[0]
return dx, dy
def moveback(self):
self.frame.update_idletasks()
self.xview(MOVETO, self.wantedpos)
def draw():
self.drawxaxis()
self.drawyaxis()
def draw_stat(self, idx, stat):
graphtype = self.graphtype
statype = self.statype
rows = stat.get_rows_n_and_other(self.numkindrows, statype)
if statype == 'Size':
kindval = dict([(r.name, r.size) for r in rows])
else:
kindval = dict([(r.name, r.count) for r in rows])
order = [r.name for r in rows]
order.reverse()
lastkindval = self.lastkindval
self.lastkindval = kindval
C = self.drawingarea
yscale = self.yscale
xscale = self.xscale
x0 = idx * xscale - 0.5 * xscale + self.canx0
x1 = x0 + xscale
ymax = 0
ymin = 0
y = 0
bw = 0.05*xscale
ocolor = None
for k in order:
dy = kindval.get(k, 0)
if not dy:
continue
color = self.getkindcolor(k)
if graphtype == 'Bars':
line = C.create_rectangle(x0+bw, -y*yscale,
x1-bw, -(y+dy)*yscale,
fill=color,
outline=color,
width = 0,
tags=("a",))
if color == ocolor:
C.create_line(x0, -(y)*yscale,
x1, -(y)*yscale,
fill='black',
tags=('barsep',))
ocolor = color
y += dy
elif graphtype == 'Lines':
if dy > ymax:
ymax = dy
elif dy < ymin:
ymin = dy
y0 = lastkindval.get(k)
if y0 is None:
y0 = dy
x00 = x0
else:
x00 = x0 - 0.4 * xscale
C.create_line(x00, - y0 * yscale,
x1 - 0.6 * xscale, - dy * yscale,
fill=color,
tags=('a',))
if 1:
C.create_line(x1 - 0.6 * xscale, - dy * yscale,
x1 - 0.4 * xscale, - dy * yscale,
fill=color,
width = 4,
tags=('a',))
else:
C.create_rectangle(x1 - 0.6 * xscale, - dy * yscale,
x1 - 0.4 * xscale, - dy * yscale,
fill=color,
outline=color,
width = 2,
tags=('a',))
if graphtype == 'Bars':
if y > ymax:
ymax = y
elif y < ymin:
ymin = y
assert idx == len(self.ymaxs) == len(self.ymins)
self.ymaxs.append(ymax)
self.ymins.append(ymin)
if idx > self.tmax:
self.tmax = idx
def drawingarea_xview(self, cmd, what, unit=None):
if cmd == 'scroll' and unit == 'units':
what = int(max(2, self.xscale)*int(what))
self.xview(cmd, what, unit)
def setcursor(self, cursor):
if cursor != self.cursor:
self.drawingarea['cursor'] = cursor
self.master['cursor'] = cursor
self.cursor = cursor
def xmarkers_set(self):
for m in self.marks:
m.set()
def xview(self, *args):
if not args:
return self.drawingarea.xview()
self.drawingarea.xview(*args)
self.xaxis.xview(*args)
self.xmarks.xview(*args)
def xview_moveto(self, fraction):
self.xview(MOVETO, fraction)
def xview_pos(self, pos, fraction=None, leftmargin = 5, rightmargin = 5):
# Scroll canvas view, if necessary, so that something
# (eg an x marker) at canvas position pos will be visible
# with minimum specified margin at left and right.
# Scroll relative to fraction; default is current xview position.
if fraction is None:
fraction = self.xview()[0]
x1, y1, x2, y2 = self.scrollregion
cc = x1 + fraction * (x2 - x1)
xm = pos - cc
lo = leftmargin
hi = self.botx - rightmargin
if xm < lo:
dx = xm - lo
xm = lo
elif xm >= hi:
dx = (xm - hi)
xm = hi
else:
dx = 0
r = fraction + dx / float(x2 - x1)
self.xview_moveto(r)
def drawxaxis(self):
scale_table = self.scale_table
self.xaxis.delete('all')
self.drawingarea.delete('xgrid')
x1, y1, x2, y2 = self.scrollregion
chdx, chdy = self.chdim
i = 0
while (scale_table[i] * self.xscale <
min(5, len(str(scale_table[i] * self.tmax))) * chdx):
i+=1
self.xstep = scale_table[i]
divisuf = (
(1000000000000l, '%dT'),
(1000000000l, '%dG'),
(1000000, '%dM'),
(1000, '%dK'),
(1, '%d')
)
for divi, form in divisuf:
if self.xstep >=divi:
break
self.xdivi = divi
self.xform = form
self.xi0 = 0
self.updatexaxis()
def updatexaxis(self):
chdx, chdy = self.chdim
step = self.xstep
gridon = self.xgrid
for i in range(self.xi0, self.tmax+step, step):
x = self.canx0 + i*self.xscale
self.xaxis.create_line(x, 0, x, 4)
if gridon:
self.drawingarea.create_line(x, 0, x, -self.boty,
tags=('xgrid',),width=2,stipple="gray25")
text = self.xform%(i / self.xdivi)
self.xaxis.create_text(x, chdy, text=text)
self.xaxis.create_line(self.canx0 + self.xi0*self.xscale, 1, x+self.xscale, 1)
self.xi0 = i
self.xmarkers_set()
def drawyaxis(self):
gridon = self.ygrid
self.yaxis.delete('all')
self.drawingarea.delete('ygrid')
chdx, chdy = self.getchdim()
width = int(self.yaxis['width'])
i = 0
maxval = self.yrange
while (self.scale_table[i] * self.yscale < 1.5 * chdy):
i+=1
step = self.scale_table[i]
divisuf = (
(1000000000000l, '%4dT'),
(1000000000l, '%4dG'),
(1000000, '%4dM'),
(1000, '%4dK'),
(1, '%5d')
)
for divi, form in divisuf:
if step >=divi:
break
for i in range(0, maxval+step, step):
y = - i*self.yscale
self.yaxis.create_line(width-3, y, width-1, y)
if gridon:
self.drawingarea.create_line(self.scrollregion[0], y,
self.scrollregion[2], y,
stipple="gray25",
tags=('ygrid',))
if 0 and i == 0:
text = '0 bytes'
else:
text = form % (i / divi)
self.yaxis.create_text(chdx*2.5, y-0.5*chdy, text=text)
#self.yaxis.create_text(chdx*2.5, 0.5*chdy, text='bytes')
self.yaxis.create_line(width-1, 0, width-1, -self.boty)
self.xmarkers_set()
def getchdim(self):
ch = self.xaxis.create_text(0, 0, text='0')
x1, y1, x2, y2 = self.xaxis.bbox(ch)
self.xaxis.delete(ch)
chdx = abs(x2 - x1)
chdy = abs(y2 - y1)
return chdx, chdy
def load_stats(self, stats):
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
self.numstats = len(stats)
self.lastkindval = {}
self.tmax = 0
self.ymax = None
self.ymaxs = []
self.ymins = []
C = self.drawingarea
C.delete('barsep')
C.delete('a')
for (i, st) in enumerate(stats):
self.draw_stat(i, st)
try:
self.drawingarea.tag_raise('barsep', 'a')
except TclError:
pass # May be 'tagOrId "a" doesn't match any items' if empty!
self.drawxaxis()
self.drawyaxis()
self.xmarkers_set()
self.yrange_auto()
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def add_stats(self, stats):
for (i, st) in enumerate(stats):
self.draw_stat(i+self.numstats, st)
self.numstats += len(stats)
self.updatexaxis()
self.setscrollregion()
def setxgrid(self, grid):
self.xgrid = grid
self.drawxaxis()
def setygrid(self, grid):
self.ygrid = grid
self.drawyaxis()
def setgraphtype(self, gmode, stats):
graphtype, statype = gmode.split(' ')
if graphtype != self.graphtype or statype != self.statype:
self.graphtype = graphtype
self.statype = statype
if statype == 'Size':
self.yunit['text'] = 'Bytes'
elif statype == 'Count':
self.yunit['text'] = 'Objects'
else:
raise ValueError
self.load_stats(stats)
def setscrollregion(self):
C = self.drawingarea
botx = self.botx
x1 = self.canx0
x2 = self.tmax * self.xscale + self.canx0
if 0:
x1extra = botx
x2extra = botx
if 1:
x1extra = botx / 2 + 2 #max(5, self.xscale*0.5)
x2extra = botx / 2 + 2 #max(5, self.xscale*0.5)
if 0:
x1extra = x2extra = max(5, self.xscale * 0.5)
x1 -= x1extra
x2 += x2extra
y1 = 1-self.boty
y2 = 1
if 0:
try:
_x1, _y1, _x2, _y2 = self.scrollregion
except:
pass
else:
if (abs(_x2 - x2) < x2extra / 2 and
abs(_x1 - x1) < x1extra / 2
):
return
self.scrollregion = (x1, y1, x2, y2)
C.configure(scrollregion = self.scrollregion)
self.xaxis.configure(scrollregion = (x1, 0, x2, 10))
self.xmarks.configure(scrollregion = (x1, 0, x2, 20))
self.yaxis.configure(scrollregion = (0, y1, 20, y2))
self.drawingarea.yview(MOVETO, 0.0)
def setxrange(self, xrange):
dxrange = self.xrange / float(xrange)
self.xrange = xrange
xscaleorg = self.drawingarea.canvasx(self.botx/2)
self.drawingarea.scale("a",xscaleorg, 0, dxrange, 1.0)
self.drawingarea.scale("barsep",xscaleorg, 0, dxrange, 1.0)
self.canx0 = xscaleorg + (self.canx0 - xscaleorg) * dxrange
self.xscale = self.botx / float(self.xrange)
self.setxscrollincrement(max(2, self.xscale))
self.drawxaxis()
self.setscrollregion()
def setxscrollincrement(self, dx):
return
self.drawingarea.configure(xscrollincrement=dx)
self.xaxis.configure(xscrollincrement=dx)
self.xmarks.configure(xscrollincrement=dx)
def setyrange(self, yrange):
dyrange = float(self.yrange) / yrange
self.yrange = yrange
self.var_yrange.set(yrange)
self.drawingarea.scale("a",0, 0, 1.0, dyrange)
self.drawingarea.scale("barsep",0, 0, 1.0, dyrange)
self.yscale = float(self.boty) / self.yrange
self.drawingarea.yview(MOVETO, 0.0)
self.drawyaxis()
def xscrollbar_set(self, first, last):
self.xscrollbar.set(first, last)
self.yrange_auto()
def yrange_auto(self, force=0):
if force or self.ycontrol.autovar.get():
lo = max(0,
int(0.5+(self.drawingarea.canvasx(0) - self.canx0) / self.xscale))
hi = min(len(self.ymaxs),
int(1.5+(self.drawingarea.canvasx(self.botx) - self.canx0) / self.xscale))
if lo == hi:
ymax = 1
else:
ymax = max(self.ymaxs[lo:hi])
if ymax != self.ymax:
self.ymax = ymax
self.ycontrol.fit(ymax)
class MarkerControl:
def __init__(self, master,
marker,
setcommand = lambda:0
):
self.sample = 0
self.numsamples = 0
self.setcommand = setcommand
self.marker = marker
self.name = marker.name
sf = self.frame = Frame(master, borderwidth=2,relief=GROOVE)
self.samplevar = SizeVar()
Label(sf, text='%s sample'%marker.name).grid(row = 0, column = 0)
Label(sf,
textvariable=self.samplevar,
font=('terminal', '16', 'bold'),
bg='black',fg='yellow'
).grid(row = 1, column = 0, padx=3,pady=3)
ClickButton(sf, text='-',
pady=0,padx=5,
command=lambda:self.changesample(-1)).grid(row=0,column=1, sticky=E)
ClickButton(sf, text='+',
pady=0,padx=5,
command=lambda:self.changesample(1)).grid(row=0,column=2, sticky=W)
self.trackingvar = BooleanVar()
self.trackbutton = Checkbutton(
sf, text='Track',
padx=5,
variable = self.trackingvar,
relief=RAISED,
command=self.settracking,
indicatoron=1,
)
self.trackbutton.grid(row=1,column=1,columnspan=2)
def changesample(self, d):
sample = self.sample + d
if 0 <= sample < self.numsamples:
self.setmarker(sample)
def setmarker(self, sample):
self.marker.move(sample)
self.setsample(sample)
def setnumsamples(self, num):
self.numsamples = num
if self.trackingvar.get() or self.sample >= self.numsamples:
self.setmarker(max(0, self.numsamples-1))
def setsample(self, sample):
self.sample = sample
self.samplevar.set(sample)
self.setcommand()
def settracking(self, tracking=None):
if tracking is not None:
self.trackingvar.set(tracking)
else:
tracking = self.trackingvar.get()
if tracking:
self.setmarker(max(0, self.numsamples-1))
class Window:
def __init__(self, app, frame, windowmenu=None):
self.app = app
self.frame = frame
self.windowmenu = windowmenu
self.wtitle = frame.title()
self._is_destroyed = 0
# Binding to <destroy> didnt work well:
# frame.bind('<Destroy>', self.event_destroy, add='+')
# I give up. I modify .destroy of frame argument instead.
self.old_destroy = frame.destroy
frame.destroy = self.new_destroy
def new_destroy(self):
if self._is_destroyed:
return
self._is_destroyed = 1
self.app.del_window(self)
try:
self.old_destroy()
except TclError:
# This may happen at closing last window
# because exit destroys the root when it sees all windows were closed.
# So I ignore it.
pass
def title(self, title):
self.frame.title(title)
self.frame.iconname(title)
self.wtitle = title
self.app.chg_window(self)
def wakeup(self):
frame = self.frame
try:
if frame.wm_state() == "iconic":
frame.wm_deiconify()
frame.tkraise()
# I don't think I want .focus_set: it behaved strange in X at least.
#frame.focus_set()
except TclError:
# This can happen when the window menu was torn off.
# Simply ignore it.
pass
class WindowMenu:
def __init__(self, frame, variable):
self.button = Menubutton(frame, text='Window')
self.menu = Menu(self.button)
self.button['menu'] = self.menu
self.variable = variable
self.wmap = {}
def add_window(self, window):
self.menu.add_radiobutton(
command = window.wakeup,
label='%d %s'%(window.wid, window.wtitle),
value=window.wid,
variable=self.variable)
self.wmap[window.wid] = self.menu.index(END)
def chg_window(self, window):
self.menu.delete(self.wmap[window.wid])
self.menu.insert_radiobutton(
self.wmap[window.wid],
command = window.wakeup,
label='%d %s'%(window.wid, window.wtitle),
value=window.wid,
variable=self.variable)
def del_window(self, window):
idx = self.wmap[window.wid]
del self.wmap[window.wid]
try:
self.menu.delete(idx)
except TclError:
# This can happen if the menu was destroyed before its contents.
# Simply ignore it.
pass
for wid in self.wmap.keys():
if self.wmap[wid] > idx:
self.wmap[wid] -= 1
class ProfileApp:
def __init__(self, mod):
self.mod = mod
root = Tk()
self.root = root
root.withdraw()
self.windows = {}
self.windowmenus = {}
self.var_window = IntVar(root)
def add_window(self, window):
window.wid = max([0]+self.windows.keys())+1
self.windows[window.wid] = window
wm = getattr(window, 'windowmenu', None)
if wm:
self.windowmenus[window.wid] = wm
for w in self.windows.values():
if w is not window:
wm.add_window(w)
for wm in self.windowmenus.values():
wm.add_window(window)
self.var_window.set(window.wid)
window.frame.bind('<FocusIn>',
lambda event:self.var_window.set(window.wid), add='+')
window.frame.bind('<Deactivate>',
lambda event:self.var_window.set(0), add='+')
def add_window_frame(self, frame, windowmenu=None):
w = Window(self, frame, windowmenu)
self.add_window(w)
return w
def chg_window(self, window):
for wm in self.windowmenus.values():
wm.chg_window(window)
def del_window(self, window):
wid = window.wid
if getattr(window, 'windowmenu', None):
del self.windowmenus[wid]
del self.windows[wid]
for wm in self.windowmenus.values():
wm.del_window(window)
if not self.windows:
self.exit()
def exit(self):
try:
self.root.destroy()
except TclError:
pass
self.root.quit()
def mainloop(self):
return self.root.mainloop()
def new_profile_browser(self, filename):
return ProfileBrowser(self, filename)
class PaneDiv:
def __init__(self, master, movecommand):
self.frame = frame = Frame(master)
self.movecommand = movecommand
self.butsize = bs = 6
bc = self.butcent = bs / 2 + 3
h = 10
self.top = Canvas(
frame,
width=10,
height=h,
)
self.top.create_line(
bc,0,bc,h,fill='#808080', width=1)
self.top.create_line(
bc+1,0,bc+1,h,fill='white', width=1)
self.rsbut = Canvas(
frame,
cursor='crosshair',
width=self.butsize,
height=self.butsize,
relief=RAISED,
bd=2
)
self.bot = Canvas(
frame,
width=10,
height=300,
bd=0
)
self.top.grid(row=0,column=0, sticky=N)
self.rsbut.grid(row=1,column=0, sticky=N)
self.bot.grid(row=2,column=0, sticky=N)
self.rsbut.bind('<Button-1>',self.but_down)
self.rsbut.bind('<ButtonRelease-1>', self.but_up)
def but_down(self, event):
self.down_event = event
self.rsbut.configure(relief=SUNKEN)
def but_up(self, event):
self.rsbut.configure(relief=RAISED)
dx = event.x - self.down_event.x
self.movecommand(dx)
def setheight(self, height):
h = height - 18
self.bot['height'] = h
bc = self.butcent
self.bot.create_line(
bc,0,bc,h,fill='#808080', width=1)
self.bot.create_line(
bc+1,0,bc+1,h,fill='white', width=1)
class TableFrame:
def __init__(self, graph, master, numkindrows, samplevar):
self.graph = graph
self.mod = graph.mod
frame = self.frame = Frame(master,borderwidth=2,relief=GROOVE)
row = 0
self.marktime = StringVar()
self.totsizevar = SizeVar()
self.sampler = StringVar()
self.sampler.set('R')
if 1:
fr = Frame(frame) # For header
om = OptionMenu(fr, self.sampler, 'R', 'L', 'R-L')
om.grid(row=0,column=0,sticky=W)
Label(fr, text='Sample').grid(row=0,column=1,sticky=W)
Label(fr, textvariable=samplevar,background='black',foreground='yellow',
).grid(row=0,column=2,sticky=W, pady=3)
Label(fr, text='at').grid(row=0,column=3,sticky=W)
Label(fr, textvariable=self.marktime).grid(row = 0, column = 4, sticky=W)
Label(fr, text='Total size = ').grid(row=1,column=0,columnspan=3,sticky=W)
Label(fr, textvar=self.totsizevar).grid(row=1,column=3,columnspan=2,sticky=W)
fr.grid(row=row, column=0, sticky=W)
row += 1
orow = row
tb = Frame(frame)
row = 0
Label(tb, text="").grid(row=row, column=0)
Label(tb, text="R", ).grid(row=row, column=1, sticky=E)
Label(tb, text="%R").grid(row=row, column=2, sticky=E)
Label(tb, text="R-L", ).grid(row=row, column=3, sticky=E)
Label(tb, text="%L").grid(row=row, column=4, sticky=E)
Label(tb, text="Kind").grid(row=row, column=5, sticky=W)
row += 1
self.profrows = []
self.totrow = ProfileRow(tb, row)
self.profrows.append(self.totrow)
row += 1
for i in range(numkindrows+1):
profrow = ProfileRow(tb, row)
self.profrows.append(profrow)
row += 1
row = orow
tb.grid(row=row, column=0, sticky=W)
# for next..
row += 1
self.totresize = 0
self.kindwidth = ProfileRow.kindwidth
def resize(self, dx, dy):
dx = int(dx)
self.totresize += dx
charresize, extra = divmod(self.totresize, 7)
newwidth = ProfileRow.kindwidth + charresize
oldwidth = self.profrows[0].kind['width']
if newwidth < 10:
newwidth = 10
dx = (newwidth - oldwidth) * 7 + extra
for pr in self.profrows:
pr.kind['width'] = newwidth
pr.kindwidth = newwidth
pr.kind['padx'] = extra / 2
import textwrap
kindtext = textwrap.fill(pr.kindtext, width=pr.kindwidth)
pr.set_kind(pr.kindtext)
return dx, dy
def update(self, lsamp, rsamp):
self.marktime.set(self.mod.time.asctime(self.mod.time.localtime(rsamp.stat.timemade)))
return
for pr in self.profrows:
pr.clear()
rdiv = float(rsamp.stat.size)
ldiv = float(lsamp.stat.size)
self.totrow.set_color_size_percent_kind(
None,
rsamp.stat.size,
100.0,
rsamp.stat.size - lsamp.stat.size,
(rsamp.stat.size - lsamp.stat.size) * 100.0 / ldiv,
'<Total>'
)
for i, r in enumerate(rsamp.rows):
l = lsamp.kindrows[r.name]
self.profrows[i+1].set_color_size_percent_kind(
self.graph.getkindcolor(r.name),
r.size,
r.size * 100.0 / rdiv,
r.size - l.size,
(r.size - l.size) * 100.0 / ldiv,
r.name)
class ColSpec:
def __init__(self, tf, header, width, pos, render, idx=()):
self.tf = tf
self.header = header
self.name = header
self.width = width
self.pos = pos
self.render = render
self.idx = idx
def align(self, text):
sp = ' '*(self.width - len(text))
if self.pos == LEFT:
text = text + sp
elif self.pos == RIGHT:
text = sp[:-1] + text + ' '
else:
assert 0
assert len(text) == self.width
return text
class TableFrame:
def __init__(self, graph, master):
self.graph = graph
self.mod = graph.mod
frame = self.frame = Frame(
master,
borderwidth=3,
relief=SUNKEN
)
self.colspecs = {}
self.colwidths = []
def defcol(names, width, pos, put, idxfunc = lambda x:()):
if callable(put):
put = [put]*len(names)
self.colwidths.append(width)
for name, put in zip(names, put):
spec = ColSpec(self, name, width, pos, put, idxfunc(name))
self.colspecs[name] = spec
defcol(('A', 'B'), 2, LEFT, self.putcolor, lambda x:x)
defcol(('Size', 'Count'), 7, RIGHT, [self.putsize, self.putcount])
defcol(('%A:Tot', '%B:Tot'), 7, RIGHT, self.putpercent, lambda name:name[1])
defcol(('B-A', 'A-B', 'Cumul'), 7, RIGHT, [self.putdiff, self.putdiff, self.putcumul],
lambda name:[(),name.split('-')]['-' in name])
defcol(('%A:Tot', '%B:Tot'), 7, RIGHT, self.putpercent, lambda name:name[1])
defcol(('Kind',), 20, LEFT, self.putkind)
width = 0
for w in self.colwidths:
width += w
self.totxresize = 0
self.totyresize = 0
self.kindcol = self.colspecs['Kind']
self.orgkindwidth = self.kindcol.width
self.widthbeforekind = width - self.orgkindwidth
self.minkindwidth = 10
self.mintextheight = 2
width += 1
self.width = self.orgwidth = width
wrap = NONE
cursor = master['cursor']
relief = FLAT
self.minpadx = 3
self.tothead = Text(
frame,
width=width,
wrap=wrap,
background='#ccc',
height=2,
padx=self.minpadx,
relief=relief,
cursor=cursor,
)
self.rowhead = Text(
frame,
width=width,
wrap=wrap,
background='#ccc',
height=1,
padx=self.minpadx,
relief=relief,
cursor=cursor,
)
self.tsframe = Frame(frame)
self.textminpady = 2
self.text = Text(
self.tsframe,
width=width,
wrap=wrap,
height=21,
background='#e0e0e0',
relief=relief,
takefocus=0,
cursor=cursor,
padx=self.minpadx,
pady=self.textminpady,
)
self.scrollbar = Scrollbar(
self.tsframe,
width=10,
orient=VERTICAL,
command=self.text.yview
)
self.scrollbar_totwidth = int(self.scrollbar['width']) + 6 # width + padding
self.uses_scrollbar = 0
self.auto_scrollbar = 1
self.orgtextheight = int(self.text['height'])
padx = 0
pady = 0
self.tothead.pack(anchor=N+W, padx=padx, pady=pady)
self.rowhead.pack(anchor=N+W, padx=padx, pady=pady)
self.text.pack(side=LEFT,anchor=N+W, padx=padx, pady=pady)
self.tsframe.pack(anchor=N+W, padx=padx, pady=pady)
def setchdim(self):
self.text.update()
self.chdx = float(self.text.winfo_width()) / self.width
self.chdy = float(self.text.winfo_height()) / self.orgtextheight
self.chdx = int(round(self.chdx))
self.chdy = int(round(self.chdy))
self.pixwidth = self.width * self.chdx
self.pixheight = self.width * self.chdy
def putcolor(self, col):
if self.colorow.name == '<Total>':
text = col.align(' ')
color = '#e0e0e0'
else:
color = self.graph.getkindcolor(self.colorow.name),
text = col.align('@')
self.text.insert('end', text, (color,))
self.text.tag_config(color,foreground=color, background='#e0e0e0',
font=('terminal', '12', 'bold'),)
def putcount(self, col):
self.valmode = 'Count'
count = self.colorow.count
self.cumulval += count
self.putval(col, count)
def putsize(self, col):
self.valmode = 'Size'
size = self.colorow.size
self.cumulval += size
self.putval(col, size)
def putval(self, col, val):
self.curval = val
self.ap(col.align(sizestring(val)))
def putpercent(self, col):
a = self.statbyname[col.idx]
if self.valmode == 'Count':
ref = a.count
elif self.valmode == 'Size':
ref = a.size
if ref:
ps = percentstring(self.curval * 100.0 / ref)
else:
ps = '---'
self.ap(col.align(ps))
def putdiff(self, col):
a, b = self.rowbyname[col.idx[0]], self.rowbyname[col.idx[1]]
if self.valmode == 'Count':
a, b = a.count, b.count
elif self.valmode == 'Size':
a, b = a.size, b.size
self.putval(col, a - b)
def putcumul(self, col):
self.putval(col, self.cumulval)
def putkind(self, col):
# Must be last!
import textwrap
wraplines = textwrap.wrap(self.colorow.name, width=col.width)
self.ap(col.align(wraplines[0]))
if len(wraplines) > 1:
initial = '\n'+' '*(self.widthbeforekind)
for line in wraplines[1:]:
self.ap(initial+col.align(line))
def setmode(self, mode, numkindrows):
self.mode = mode
self.numkindrows = numkindrows
self.mcontrols = self.graph.mcontrolbyname
self.stats = self.graph.stats
self.cols = [self.colspecs[x.strip()] for x in mode.split(' ') if x.strip()]
self.controlnames = {}
name = self.cols[0].idx
self.colorcontrol = self.mcontrols[name]
self.controlnames[name] = 1
self.controls = [self.colorcontrol]
self.lastidxs = [None]
for i, co in enumerate(self.cols):
idx = co.idx
if not isinstance(idx, (tuple, list)):
idx = (idx,)
for idx in idx:
if idx not in self.controlnames:
self.controls.append(self.mcontrols[idx])
self.controlnames[idx] = 1
self.lastidxs.append(None)
def setscrollbar(self, sb):
if sb == self.uses_scrollbar:
return
self.uses_scrollbar = sb
w = self.scrollbar_totwidth
if sb:
self.resize(-w, 0, setscrollbar=0)
self.scrollbar.pack(side=LEFT, fill=Y)
self.text['yscrollcommand'] = self.scrollbar.set
else:
self.resize(w, 0, setscrollbar=0)
self.scrollbar.pack_forget()
self.text['yscrollcommand'] = None
def update_simple(self, lsamp, rsamp):
t = self.text
t.delete('1.0', '100.0')
t.insert('1.0', str(rsamp.stat))
def update(self, force=0, setscrollbar=1):
stats = self.stats
idxs = [max(0, min(control.sample, len(stats)-1)) for control in self.controls]
if (idxs == self.lastidxs) and not force:
return
self.lastidxs = idxs
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = NORMAL
self.text.delete('1.0', END)
self.tothead.delete('1.0', END)
self.rowhead.delete('1.0', END)
if not stats:
self.tothead.insert('end', '-- No Sample --')
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = DISABLED
return
self.statbyname = {}
statbyidx = []
for i, control in enumerate(self.controls):
stat = stats[idxs[i]]
statbyidx.append(stat)
self.statbyname[control.name] = stat
samps = self.samps = [
Sample(self.mod, statbyidx[0], self.controls[0].marker.name, idxs[0],
numkindrows=self.numkindrows,
statype = self.graph.display.statype
)]
self.colorsamp = samps[0]
if len(self.controls) > 1:
samps.append(Sample(self.mod, statbyidx[1], self.controls[1].marker.name, idxs[1],
relative=samps[0]))
self.relsamp = samps[1]
t = self.tothead
n = max([len(str(samp.index)) for samp in samps])
for samp in samps:
t.insert('end', 'Sample %s: '%samp.name)
t.insert('end', ('%%%dd'%n)%samp.index, ('index',))
t.insert('end', ' at %s\n' % (samp.datetime))
t.tag_configure('index', background='#e0e0e0')
t = self.rowhead
self.sizes = [float(samp.stat.size) for samp in samps]
for col in self.cols:
t.insert('end', col.align(col.header), ('header',))
t.insert('end', '\n')
t = self.text
self.ap = lambda text:t.insert('end', text)
self.colorow = Row(samps[0].count, samps[0].size, '<Total>')
self.rowbyname = self.statbyname
self.cumulval = 0
for col in self.cols:
col.render(col)
self.ap('\n\n')
self.cumulval = 0
for i, a in enumerate(samps[0].rows):
self.colorow = a
if len(samps) > 1:
self.rowbyname = {
samps[0].name:a,
samps[1].name:samps[1].kindrows[a.name]
}
for col in self.cols:
col.render(col)
self.ap('\n')
if setscrollbar and self.auto_scrollbar:
numrows = int(self.text.index('end').split('.')[0])-2
h = int(self.text['height'])
needs_scrollbar = numrows > h
if needs_scrollbar != self.uses_scrollbar:
self.setscrollbar(needs_scrollbar)
self.text['state'] = self.tothead['state'] = self.rowhead['state'] = DISABLED
def resize(self, dx, dy, setscrollbar=1):
dx = int(dx)
oldwidth = self.pixwidth
newwidth = self.pixwidth + dx
if newwidth < self.chdx * 2:
newwidth = self.chdx * 2
self.pixwidth = newwidth
dx = newwidth - oldwidth
charwidth, extra = divmod(newwidth, self.chdx)
self.kindcol.width = max(charwidth - self.widthbeforekind - 1, self.minkindwidth)
self.totxresize += dx
for t in (self.tothead, self.rowhead, self.text):
t['width'] = charwidth
t['padx'] = self.minpadx + extra / 2
dy = int(dy)
rowresize, extra = divmod(self.totyresize + dy, self.chdy)
newheight = self.orgtextheight + rowresize
oldheight = int(self.text['height'])
if newheight < self.mintextheight:
newheight = self.mintextheight
dy = (newheight - oldheight) * self.chdy + extra
self.totyresize += dy
self.text['height'] = newheight
self.text['pady'] = self.textminpady + extra / 2
self.update(force=1, setscrollbar=1)
return dx, dy
class Filler:
def __init__(self, master):
self.frame = self.can = Canvas(
master,
#background='blue',
width=0,
height=0)
def getsize(self):
return int(self.can['width']),int(self.can['height']),
def setsize(self, w, h):
self.can.configure(
width = w,
height = h
)
def resize(self, dw, dh):
w, h = self.getsize()
self.setsize(max(0, w + dw), max(0, h + dh))
class Row:
def __init__(self, count, size, name):
self.count = count
self.size = size
self.name = name
class Sample:
def __init__(self, mod, stat, name, index, numkindrows=None, statype='Size', relative=None):
self.stat = stat
self.size = stat.size
self.count = stat.count
self.name = name
self.index = index
self.datetime = mod.time.asctime(mod.time.localtime(stat.timemade))
self.kindrows = {}
if numkindrows is not None:
rows = stat.get_rows_n_and_other(numkindrows, statype)
for r in rows:
self.kindrows[r.name] = r
else:
kinds = []
oidx = None
for row in relative.rows:
if row.name == '<Other>':
oidx = len(kinds)
continue
else:
kinds.append(row.name)
rows = stat.get_rows_of_kinds(kinds)
size = 0
count = 0
for i, row in enumerate(rows):
kind = kinds[i]
if row is None:
row = Row(0, 0, kind)
self.kindrows[kind] = row
size += row.size
count += row.count
if oidx is not None:
other = Row(stat.count - count, stat.size - size, '<Other>')
rows[oidx:oidx] = [other]
self.kindrows['<Other>'] = other
self.rows = rows
class ProfileBrowser:
colors = ("red", "green", "blue", "yellow", "magenta", "cyan", 'white')
numkindrows = 10
def __init__(self, app, filename):
self.inited = 0
self.app = app
self.mod = mod = app.mod
self.master = master = app.root
if filename:
filename = mod.path.abspath(filename)
self.initialdir = mod.path.dirname(filename)
else:
self.initialdir = mod.os.getcwd()
self.frame = frame = Toplevel(
master,
#background='#bbb'
)
#frame['cursor'] = 'umbrella'
#frame.resizable(True,True)
self.menubar = Frame(self.frame, relief=RAISED, bd=2)
self.filebutton = Menubutton(self.menubar, text='File')
self.filemenu = Menu(self.filebutton)
self.filebutton['menu'] = self.filemenu
self.filemenu.add_command(label='New Profile Browser', command=self.cmd_new)
self.filemenu.add_command(label='Open Profile', command=self.cmd_open)
self.filemenu.add_command(label='Close Window', command=self.cmd_close)
self.filemenu.add_command(label='Clear Cache', command=self.cmd_clear_cache)
self.filemenu.add_command(label='Exit', command=self.cmd_exit)
self.panebutton = Menubutton(self.menubar, text='Pane')
self.panemenu = Menu(self.panebutton)
self.panebutton['menu'] = self.panemenu
choices = [
('Bars', 'Lines'),
('Size', 'Count'),
]
self.graphtypevar = StringVar()
self.graphbutton = self.modechooser(
self.menubar, 'Graph', choices,
self.graphtypevar, self.cmd_graphtype)
choices = [
('A', 'B'),
('Size', 'Count'),
('%A:Tot', '%B:Tot'),
('Cumul', 'A-B', 'B-A'),
('%A:Tot', '%B:Tot'),
('Kind',),
]
self.var_tablemode=StringVar()
self.tablebutton = Menubutton(self.menubar, text='Table')
self.tablemenu = Menu(self.tablebutton)
self.tablebutton['menu'] = self.tablemenu
self.headermenu = Menu(self.tablebutton, title='Table header')
self.addmodechooser(
self.headermenu,
choices,
self.var_tablemode,
self.cmd_tablemode
)
self.tablemenu.add_cascade(label='Header',menu=self.headermenu)
self.var_tablescrollbar = StringVar()
self.tablescrollbarmenu = Menu(self.tablebutton, title = 'Table scrollbar')
self.addmodechooser(
self.tablescrollbarmenu,
[('Auto', 'On', 'Off')],
self.var_tablescrollbar,
self.cmd_tablescrollbar
)
self.tablemenu.add_cascade(
label='Scrollbar',
menu = self.tablescrollbarmenu)
self.windowmenu = WindowMenu(self.menubar, self.app.var_window)
self.window = app.add_window_frame(self.frame, self.windowmenu)
self.helpbutton = Menubutton(self.menubar, text='Help')
self.helpmenu = Menu(self.helpbutton)
self.helpbutton['menu'] = self.helpmenu
self.helpmenu.add_command(label='About', command=self.cmd_about)
self.helpmenu.add_command(label='Help', command=self.cmd_help)
self.ctrlframe = Frame(
self.frame,
bd=2,
relief=GROOVE,
#background='#999',
)
self.exitbutton = Button(self.ctrlframe, text='Exit', command=self.cmd_exit,
background='red')
self.set_filename(filename)
self.id_collect = None
self.collecting = IntVar()
self.collecting.set(0)
self.collectbutton = Checkbutton(self.ctrlframe, text='Collect',
variable = self.collecting,
command=self.cmd_collect,
relief=RAISED)
self.stats = Stats(self.mod)
self.disptab = Frame(self.frame,
#relief=SUNKEN,
#bd=3
)
self.display = Display(self.disptab,
scale_table = AxisControl.scale_table,
numkindrows = self.numkindrows,
getkindcolor = self.getkindcolor,
)
self.xcontrol = AxisControl(self.ctrlframe,
name = 'X',
range = self.display.xrange,
grid = self.display.xgrid,
unit = 'samples',
rangecommand = self.display.setxrange,
gridcommand = self.display.setxgrid
)
self.ycontrol = AxisControl(self.ctrlframe,
name = 'Y',
range = self.display.yrange,
grid = self.display.ygrid,
unit = 'bytes',
rangecommand = self.display.setyrange,
gridcommand = self.display.setygrid,
autocommand = self.display.cmd_yrange_auto
)
self.display.xcontrol = self.xcontrol
self.display.ycontrol = self.ycontrol
self.mcontrols = []
self.mcontrolbyname = {}
for name in ('A', 'B'):
marker = self.display.new_xmarker(name)
control = MarkerControl(self.ctrlframe, marker, self.update_tableframe)
marker.set_poscommand(control.setsample)
self.mcontrols.append(control)
self.mcontrolbyname[name] = control
if 0:
self.optionsmenu.add_checkbutton(
label='X grid',
variable = self.display.var_xgrid,
command = self.display.cmd_xgrid)
self.optionsmenu.add_checkbutton(
label='Y grid',
variable = self.display.var_ygrid,
command = self.display.cmd_ygrid)
self.var_showcontrol=BooleanVar()
self.var_showcontrol.set(1)
self.panemenu.add_checkbutton(
label='Show Control Panel',
variable = self.var_showcontrol,
command = self.cmd_showcontrol)
self.var_showgraph=BooleanVar()
self.var_showgraph.set(1)
self.panemenu.add_checkbutton(
label='Show Graph',
variable = self.var_showgraph,
command = self.cmd_showgraph)
self.var_showtable=BooleanVar()
self.var_showtable.set(1)
self.panemenu.add_checkbutton(
label='Show Table',
variable = self.var_showtable,
command = self.cmd_showtable)
tf = self.tf = TableFrame(self, self.disptab)
d_t = self.d_t = PaneDiv(self.disptab, movecommand=self.cmd_dt_moved)
if 0:
self.ycontrol.frame.pack(side=LEFT, padx=3,pady=3)
self.xcontrol.frame.pack(side=LEFT, padx=3,pady=3)
self.scontrol.frame.pack(side=LEFT, padx=3, pady=3)
self.graphtypeframe.pack(side=LEFT, padx=3,pady=3)
self.collectbutton.pack(side=LEFT, padx=3,pady=3)
else:
self.xcontrol.frame.grid(row=0,column=0, padx=3,pady=3, sticky=W)
self.ycontrol.frame.grid(row=1,column=0, padx=3,pady=3)
self.mcontrols[0].frame.grid(row=0,column=1, columnspan=1,sticky=W,padx=3,pady=3)
self.mcontrols[1].frame.grid(row=1,column=1, columnspan=1,sticky=W,padx=3,pady=3)
self.exitbutton.grid(row=0,column=2, padx=3,pady=3)
self.collectbutton.grid(row=0,column=3, padx=3,pady=3)
self.filler = Filler(self.frame)
if 1:
self.filebutton.pack(side=LEFT)
self.panebutton.pack(side=LEFT)
self.graphbutton.pack(side=LEFT)
self.tablebutton.pack(side=LEFT)
self.windowmenu.button.pack(side=LEFT)
self.helpbutton.pack(side=LEFT)
self.menubar.grid(column=0,columnspan=4, sticky=N+W+E)
self.gridmain()
if 0:
self.display.frame.grid(row = 0, column = 0, sticky=N+W, padx=3,pady=3)
tf.frame.grid(row=0, column=1, sticky=S+E, padx=3,pady=3)
self.ctrlframe.grid(row=1,column=0, columnspan=2, sticky=W)
frame.bind('<Map>', self.event_map)
self.tf.setmode(self.var_tablemode.get(), self.numkindrows)
self.load_filename(filename)
d_t.frame.update_idletasks()
d_t.setheight(max(self.display.frame.winfo_height(),
tf.frame.winfo_height()))
d_t.frame.update_idletasks()
self.minsize = (500,400)
self.maxsize = (self.frame.winfo_screenwidth(), self.frame.winfo_screenheight())
minsizes = {
# (ctrl, disp, tab) : (width, height)
(0,0,0): (270, 25),
(1,0,0): (363, 61),
(0,1,0): (270, 131),
(1,1,0): (270, 131),
}
self.setusergeometry()
def initfinal():
self.tf.setchdim()
rx = self.frame.winfo_rootx() + self.frame.winfo_width()
self.tf_wanted_margin = rx - (self.tf.frame.winfo_rootx() + self.tf.frame.winfo_width())
self.lastw = self.frame.winfo_width()
self.lasth = self.frame.winfo_height()
self.in_configure = 0
frame.bind('<Configure>', self.event_configure)
self.inited = 1
initfinal()
#self.frame.after_idle(initfinal)
def cmd_about(self):
self.cmd_help('about')
def cmd_help(self, pickname='help'):
os = self.mod.os
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
m = self.mod.Text.gsltextviewer(
self.frame,
inpickle = getattr(self.mod.pbhelp, pickname)
#htmloutfile='/tmp/x.html',
)
self.app.add_window_frame(m)
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def cmd_clear_cache(self):
self.stats.clear_cache()
def cmd_close(self):
self.frame.destroy()
def cmd_collect(self, *args):
#self.afterfunc()
#self.frame.after(1, self.afterfunc) # Turn on button first.??
if self.collecting.get():
self.event_collect()
else:
if self.id_collect is not None:
self.frame.after_cancel(self.id_collect)
self.id_collect = None
def event_collect(self):
o, n = self.stats.collect()
if n:
if o != self.display.numstats:
self.display.load_stats(self.stats)
else:
st = self.stats[-n:]
self.display.add_stats(st)
for c in self.mcontrols:
c.setnumsamples(len(self.stats))
self.id_collect = self.frame.after(1000, self.event_collect)
def cmd_dt_moved(self, dx):
# The division between display and table panes moved.
# Disable configure event handling while we are resizing.
self.in_configure += 1
# Right x position of enclosing frame
rx = self.frame.winfo_rootx() + self.frame.winfo_width()
# Right margin between pane divider and enclosing window
mx = rx - (self.d_t.frame.winfo_rootx() + self.d_t.frame.winfo_width())
# Don't move pane divider outside window
dx = min(dx, mx)
# Right margin between table and enclosing window
# before resizing
mx = rx - (self.tf.frame.winfo_rootx() + self.tf.frame.winfo_width())
dx, _ = self.display.resize(dx, 0)
wanted_margin = self.tf_wanted_margin
# After move
mx -= dx
self.tf.resize(mx - wanted_margin, 0)
self.display.moveback()
self.in_configure -= 1
def cmd_exit(self):
self.app.exit()
def cmd_graphtype(self):
self.display.setgraphtype(self.graphtypevar.get(), self.stats)
self.cmd_tablemode()
def cmd_new(self):
self.app.new_profile_browser(self.filename)
def cmd_open(self):
op = tkFileDialog.Open(self.frame,
# ? Should we have default extension or not??
# defaultextension='.hpy',
initialdir = self.initialdir,
filetypes=[('Heapy data files','.hpy'),
('All files', '*')
]
)
filename = op.show()
if filename:
self.load_filename(filename)
def cmd_showcontrol(self):
self.grid_things()
def cmd_showgraph(self):
if self.var_showgraph.get() and self.var_showtable.get():
self.tf.resize(-self.tf.totxresize, 0)
self.display.resize(self.display.orgwidth - self.display.botx, 0)
self.display.moveback()
self.grid_things()
cmd_showtable = cmd_showgraph
def cmd_tablemode(self):
self.tf.setmode(self.var_tablemode.get(), self.numkindrows)
self.tf.update()
def cmd_tablescrollbar(self):
tf = self.tf
s = self.var_tablescrollbar.get()
if s == 'Auto':
tf.auto_scrollbar = 1
tf.update(force=1, setscrollbar=1)
elif s == 'On':
tf.auto_scrollbar = 0
tf.setscrollbar(1)
elif s == 'Off':
tf.auto_scrollbar = 0
tf.setscrollbar(0)
else:
assert 0
def setusergeometry(self):
# Make the geometry of the window be user-specified
# This is called after Tk has determined the size
# of the window needed for the initial widget configuration.
# The size is not to be changed after that, other than
# on user request.
# I couldn't just do frame.geometry(frame.geometry()) because,
# presumably, of a bug in the Tk and/or wm I am using. I hope
# this works for all systems .. Notes 26 Oct 2005.
self.frame.update()
g = '%dx%d+%d+%d'%(
self.frame.winfo_width(),
self.frame.winfo_height(),
self.frame.winfo_rootx(),
self.frame.winfo_rooty())
self.frame.geometry(g)
def modechooser(self, frame, name, choices, cmdvar, command):
button = Menubutton(frame, text=name)
menu = Menu(button)
button['menu'] = menu
self.addmodechooser(menu, choices, cmdvar, command)
return button
def addmodechooser(self, menu, choices, cmdvar, command):
def setcmdvar():
cmdvar.set(' '.join([v.get() for v in vars]))
def cmd():
setcmdvar()
command()
vars = []
for ch in choices:
var = StringVar()
vars.append(var)
var.set(ch[0])
for a in ch:
menu.add_radiobutton(
command = cmd,
label = a,
value=a,
variable=var,
#font=('Courier','12', 'bold'),
#font=('Helvetica','12', 'bold'),
columnbreak = (a == ch[0])
)
setcmdvar()
def grid_things(self):
ow = self.frame.winfo_width()
oh = self.frame.winfo_height()
self.ctrlframe.grid_forget()
self.display.frame.grid_forget()
self.d_t.frame.grid_forget()
self.tf.frame.grid_forget()
self.disptab.grid_forget()
self.filler.frame.grid_forget()
self.gridmain()
self.frame.update_idletasks()
self.sizewidgets()
def gridmain(self):
row = 1
c = self.var_showcontrol.get()
if c:
self.ctrlframe.grid(row=row,column=0, columnspan=3, padx=3,pady=3,sticky=W)
row += 1
column = 0
g = self.var_showgraph.get()
t = self.var_showtable.get()
gt = (g, t)
if g:
self.display.frame.grid(row=0, column = column, sticky=N+W,
padx=3,pady=3
)
column += 1
if g and t:
self.d_t.frame.grid(row=0, column=column, sticky=N+W)
column += 1
if t:
self.tf.frame.grid(row=0, column=column, sticky=N+W
, padx=3,pady=3
)
if g or t:
self.disptab.grid(row=row, column=0,
sticky=N+W,
#padx=3,pady=3,
)
row += 1
self.filler.setsize(0, 0)
self.filler.frame.grid(row=row,column=3, sticky=N+W)
if 0 and not (g or t):
self.frame.resizable(0,0)
else:
self.frame.resizable(1,1)
def event_configure(self, event):
if event.widget is not self.frame:
return
if not self.inited:
return
if self.in_configure:
return
curw = self.frame.winfo_width()
curh = self.frame.winfo_height()
if curw == self.lastw and curh == self.lasth:
return
self.in_configure += 1
self.lastw = curw
self.lasth = curh
self.sizewidgets()
self.in_configure -= 1
def sizewidgets(self):
self.frame.update()
curw = self.frame.winfo_width()
curh = self.frame.winfo_height()
mbx = self.menubar.winfo_rootx()
mby = self.menubar.winfo_rooty()
sfs = []
if self.var_showgraph.get():
sfs.append(self.display)
if self.var_showtable.get():
sfs.append(self.tf)
if not sfs:
sfs.append(self.filler)
dys = {}
didh = 0
for sf in sfs:
f = sf.frame
diy = f.winfo_rooty()
dih = f.winfo_height()
ch = diy - mby + dih
dy = curh - ch - 7
didh = didh or dy
dys[sf] = dy
if self.var_showtable.get():
f = self.tf.frame
elif self.var_showgraph.get():
f = self.display.frame
else:
f = self.filler.frame
fx = f.winfo_rootx()
fw = f.winfo_width()
cw = fx - mbx + fw
fdw = curw - cw - 6
if f is self.filler.frame and not self.var_showcontrol.get():
fdw = curw - self.filler.getsize()[0] - 3
if didh or fdw:
if self.var_showgraph.get() and self.var_showtable.get():
dprop = float(self.display.frame.winfo_width())
dprop = dprop / (dprop + self.tf.frame.winfo_width())
dx, dy = self.display.resize(fdw * dprop, dys[self.display])
self.tf.resize(fdw - dx, dys[self.tf])
self.frame.update_idletasks()
self.d_t.setheight(max(self.display.frame.winfo_height(),
self.tf.frame.winfo_height()))
elif self.var_showgraph.get():
self.display.resize(fdw, dys[self.display])
elif self.var_showtable.get():
self.tf.resize(fdw, dys[self.tf])
else:
self.filler.resize(fdw, dys[self.filler])
self.filler.setsize(self.filler.getsize()[0],1000)
if self.var_showgraph.get():
self.display.moveback()
#self.resize(dw, dh)
def resize(self, dw, dh):
self.display.resize(dw, dh)
#self.frame.wm_geometry('')
def event_map(self, event):
self.frame.unbind('<Map>')
self.frame.bind('<Unmap>', self.event_unmap)
self.frame.lift()
def event_unmap(self, event):
self.frame.unbind('<Unmap>')
self.frame.bind('<Map>', self.event_map)
def load_filename(self, filename):
ocursor = self.frame.winfo_toplevel()['cursor']
try:
self.frame.winfo_toplevel()['cursor'] = 'watch'
self.frame.update()
if filename:
filename = self.mod.path.abspath(filename)
try:
self.stats.open(filename)
except:
etype, value, tb = self.mod._root.sys.exc_info()
tkMessageBox.showerror(
master=self.frame,
message = (
"Error when loading\n%r:\n"%filename+
"%s"%''.join(self.mod._root.traceback.format_exception_only(
etype, value)))
)
else:
self.display.load_stats(self.stats)
for c in self.mcontrols:
c.setnumsamples(len(self.stats))
#self.scontrol.trackcommand(1)
self.set_filename(filename)
self.xrange_fit()
self.display.xview_moveto(0)
self.mcontrols[1].settracking(0)
self.mcontrols[0].settracking(1)
self.yrange_fit()
self.tf.update(force=1)
if filename:
self.initialdir = self.mod.path.dirname(filename)
finally:
self.frame.winfo_toplevel()['cursor'] = ocursor
def update_tableframe(self):
self.tf.update()
def getkindcolor(self, kind):
if kind == '<Other>':
return 'black'
else:
return self.colors[abs(hash(kind))%len(self.colors)]
def set_filename(self, filename):
self.filename = filename
if not filename:
filename = '<No File>'
title = 'Heapy Profile Browser: %s'%filename
self.window.title(title)
def setnormpos(self):
self.setscrollregion()
if self.ymax >= self.yrange:
self.yrange_fit()
if self.xi0 is None:
self.drawxaxis()
else:
self.updatexaxis()
self.track()
def redraw_all(self):
pass
def trackoff(self):
self.rcontrol.settracking(0)
def xrange_fit(self):
self.xcontrol.fit(len(self.stats))
def yrange_fit(self):
self.display.yrange_auto(force=1)
class _GLUECLAMP_:
_imports_ = (
'_parent:Use',
'_parent:pbhelp',
'_root.guppy.etc:textView',
'_root.guppy:specs',
'_root:md5',
'_root:os',
'_root.os:path',
'_root:time',
'_root.guppy.gsl:Text',
)
def pb(self, filename=None):
"""pb( [filename: profilefilename+])
Create a Profile Browser window.
Argument
filename: profilefilename+
The name of a file containing profile data.
See also
Heapy Profile Browser[1]
Screenshot[2]
References
[0] heapy_Use.html#heapykinds.Use.pb
[1] ProfileBrowser.html
[2] pbscreen.jpg"""
pa = ProfileApp(self)
pa.new_profile_browser(filename)
pa.mainloop()
def tpg(self):
self('/tmp/x.hpy')
| apache-2.0 | -1,603,949,223,153,726,500 | 22.952215 | 96 | 0.641441 | false |
jp-bpl/configuration | util/vpc-tools/tag-old-ebs.py | 62 | 7679 | """
For a given aws account, go through all un-attached volumes and tag them.
"""
import boto
import boto.utils
import argparse
import logging
import subprocess
import time
import os
from os.path import join, exists, isdir, islink, realpath, basename, dirname
import yaml
# needs to be pip installed
import netaddr
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
log_level = logging.INFO
def tags_for_hostname(hostname, mapping):
logging.debug("Hostname is {}".format(hostname))
if not hostname.startswith('ip-'):
return {}
octets = hostname.lstrip('ip-').split('-')
tags = {}
# Update with env and deployment info
tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]])
ip_addr = netaddr.IPAddress(".".join(octets))
for key, value in mapping['CIDR_REST'].items():
cidr = ".".join([
mapping['CIDR_FIRST_OCTET'],
octets[1],
key])
cidrset = netaddr.IPSet([cidr])
if ip_addr in cidrset:
tags.update(value)
return tags
def potential_devices(root_device):
device_dir = dirname(root_device)
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
all_devices = filter(relevant_devices, all_devices)
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
return map(lambda x: join(device_dir, x), all_devices)
def get_tags_for_disk(mountpoint):
tag_data = {}
# Look at some files on it to determine:
# - hostname
# - environment
# - deployment
# - cluster
# - instance-id
# - date created
hostname_file = join(mountpoint, "etc", "hostname")
edx_dir = join(mountpoint, 'edx', 'app')
if exists(hostname_file):
# This means this was a root volume.
with open(hostname_file, 'r') as f:
hostname = f.readline().strip()
tag_data['hostname'] = hostname
if exists(edx_dir) and isdir(edx_dir):
# This is an ansible related ami, we'll try to map
# the hostname to a knows deployment and cluster.
cluster_tags = tags_for_hostname(hostname, mappings)
tag_data.update(cluster_tags)
else:
# Not an ansible created root volume.
tag_data['cluster'] = 'unknown'
else:
# Not a root volume
tag_data['cluster'] = "unknown"
instance_file = join(mountpoint, "var", "lib", "cloud", "instance")
if exists(instance_file) and islink(instance_file):
resolved_path = realpath(instance_file)
old_instance_id = basename(resolved_path)
tag_data['instance-id'] = old_instance_id
return tag_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.")
parser.add_argument("--profile", '-p',
help="AWS Profile to use with boto.")
parser.add_argument("--noop", "-n", action="store_true",
help="Don't actually tag anything.")
parser.add_argument("--verbose", "-v", action="store_true",
help="More verbose output.")
parser.add_argument("--device", "-d", default="/dev/xvdf",
help="The /dev/??? where the volume should be mounted.")
parser.add_argument("--mountpoint", "-m", default="/mnt",
help="Location to mount the new device.")
parser.add_argument("--config", "-c", required=True,
help="Configuration to map hostnames to tags.")
# The config should specify what tags to associate with the second
# and this octet of the hostname which should be the ip address.
# example:
args = parser.parse_args()
mappings = yaml.safe_load(open(args.config,'r'))
# Setup Logging
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=log_level)
# setup boto
ec2 = boto.connect_ec2(profile_name=args.profile)
# get mounting args
id_info = boto.utils.get_instance_identity()['document']
instance_id = id_info['instanceId']
az = id_info['availabilityZone']
root_device = args.device
mountpoint = args.mountpoint
# Find all unattached volumes
filters = { "status": "available", "availability-zone": az }
potential_volumes = ec2.get_all_volumes(filters=filters)
logging.debug("Found {} unattached volumes in {}".format(len(potential_volumes), az))
for vol in potential_volumes:
if "cluster" in vol.tags:
continue
# Attach volume to the instance running this process
logging.debug("Trying to attach {} to {} at {}".format(
vol.id, instance_id, root_device))
try:
ec2.attach_volume(vol.id, instance_id, root_device)
# Wait for the volume to finish attaching.
waiting_msg = "Waiting for {} to be available at {}"
timeout = TIMEOUT
while not exists(root_device):
time.sleep(2)
logging.debug(waiting_msg.format(vol.id, root_device))
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while attaching {}.".format(vol.id))
exit(1)
# Because a volume might have multiple mount points
devices_on_volume = potential_devices(root_device)
if len(devices_on_volume) != 1:
vol.add_tag("devices_on_volume", str(devices_on_volume))
# Don't tag in this case because the different devices
# may have conflicting tags.
logging.info("Skipping {} because it has multiple mountpoints.".format(vol.id))
logging.info("{} has mountpoints {}".format(vol.id, str(devices_on_volume)))
else:
device = devices_on_volume[0]
try:
# Mount the volume
subprocess.check_call(["sudo", "mount", device, mountpoint])
# Learn all tags we can know from content on disk.
tag_data = get_tags_for_disk(mountpoint)
tag_data['created'] = vol.create_time
# If they are found tag the instance with them
if args.noop:
logging.info("Would have tagged {} with: \n{}".format(vol.id, str(tag_data)))
else:
logging.info("Tagging {} with: \n{}".format(vol.id, str(tag_data)))
vol.add_tags(tag_data)
finally:
# Un-mount the volume
subprocess.check_call(['sudo', 'umount', mountpoint])
finally:
# Need this to be a function so we always re-check the API for status.
is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available"
timeout = TIMEOUT
while exists(root_device) or is_attached(vol.id):
if is_attached(vol.id):
try:
# detach the volume
ec2.detach_volume(vol.id)
except boto.exception.EC2ResponseError as e:
logging.warning("Failed to detach volume. Will try again in a bit.")
time.sleep(2)
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while detaching {}.".format(vol.id))
exit(1)
logging.debug("Waiting for {} to be detached.".format(vol.id))
| agpl-3.0 | 8,793,607,458,759,327,000 | 36.096618 | 101 | 0.582367 | false |
lucidfrontier45/scikit-learn | examples/covariance/plot_covariance_estimation.py | 2 | 4991 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
The usual estimator for covariance is the maximum likelihood estimator,
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotical optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
| bsd-3-clause | -3,581,773,570,626,907,000 | 37.392308 | 79 | 0.686836 | false |
sorgerlab/bioagents | bioagents/tests/test_model_diagnoser.py | 2 | 2836 | from indra.statements import *
from bioagents.mra.model_diagnoser import ModelDiagnoser
from indra.assemblers.pysb import PysbAssembler
from nose.plugins.attrib import attr
drug = Agent('PLX4720')
raf = Agent('RAF', db_refs={'FPLX': 'RAF'})
mek = Agent('MEK', db_refs={'FPLX': 'MEK'})
erk = Agent('ERK', db_refs={'FPLX': 'ERK'})
def test_missing_activity1():
stmts = [Activation(raf, mek), Phosphorylation(mek, erk)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 1
assert suggs[0].enz.name == 'MEK'
assert suggs[0].enz.activity
assert suggs[0].enz.activity.activity_type == 'activity'
def test_missing_activity2():
stmts = [Inhibition(drug, raf), Activation(raf, mek)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 1
assert suggs[0].subj.name == 'RAF'
assert suggs[0].subj.activity
assert suggs[0].subj.activity.activity_type == 'activity'
def test_missing_activity3():
stmts = [Activation(raf, mek), Activation(raf, erk)]
md = ModelDiagnoser(stmts)
suggs = md.get_missing_activities()
assert len(suggs) == 0
def test_check_model():
explain = Activation(raf, erk)
mek_active = Agent('MEK', db_refs={'FPLX': 'MEK'},
activity=ActivityCondition('activity', True))
model_stmts = [Activation(raf, mek), Activation(mek_active, erk)]
# Build the pysb model
pa = PysbAssembler()
pa.add_statements(model_stmts)
pa.make_model(policies='one_step')
md = ModelDiagnoser(model_stmts, pa.model, explain)
result = md.check_explanation()
assert result['has_explanation'] is True
path = result['explanation_path']
assert len(path) == 2
assert path[0] == model_stmts[0]
assert path[1] == model_stmts[1]
@attr('nonpublic')
def test_propose_statement():
jun = Agent('JUN', db_refs={'HGNC':'6204', 'UP': 'P05412'})
explain = Activation(raf, jun)
erk_active = Agent('ERK', db_refs={'FPLX': 'ERK'},
activity=ActivityCondition('activity', True))
# Leave out MEK activates ERK
model_stmts = [Activation(raf, mek), Activation(erk_active, jun)]
# Build the pysb model
pa = PysbAssembler()
pa.add_statements(model_stmts)
pa.make_model(policies='one_step')
md = ModelDiagnoser(model_stmts, pa.model, explain)
result = md.check_explanation()
assert result['has_explanation'] is False
assert result.get('explanation_path') is None
inf_prop = result.get('connect_rules')
assert inf_prop == ('RAF_activates_MEK_activity',
'ERK_act_activates_JUN_activity'), inf_prop
stmt_prop = result.get('connect_stmts')
assert stmt_prop == (model_stmts[0], model_stmts[1])
stmt_suggestions = md.suggest_statements(*stmt_prop)
| bsd-2-clause | -759,491,562,513,554,800 | 34.898734 | 69 | 0.654795 | false |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/idlelib/CallTips.py | 43 | 7941 | """CallTips.py - An IDLE Extension to Jog Your Memory
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import re
import sys
import types
from idlelib import CallTipWindow
from idlelib.HyperParser import HyperParser
import __main__
class CallTips:
menudefs = [
('edit', [
("Show call tip", "<<force-open-calltip>>"),
])
]
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
return
self.editwin = editwin
self.text = editwin.text
self.calltip = None
self._make_calltip_window = self._make_tk_calltip_window
def close(self):
self._make_calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return CallTipWindow.CallTip(self.text)
def _remove_calltip_window(self, event=None):
if self.calltip:
self.calltip.hidetip()
self.calltip = None
def force_open_calltip_event(self, event):
"""Happens when the user really wants to open a CallTip, even if a
function call is needed.
"""
self.open_calltip(True)
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a CallTip, but not really
necessary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
"""If there is already a calltip window, check if it is still needed,
and if so, reload it.
"""
if self.calltip and self.calltip.is_active():
self.open_calltip(False)
def open_calltip(self, evalfuncs):
self._remove_calltip_window()
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
expression = hp.get_expression()
if not expression or (not evalfuncs and expression.find('(') != -1):
return
arg_text = self.fetch_tip(expression)
if not arg_text:
return
self.calltip = self._make_calltip_window()
self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class
If there is a Python subprocess, get the calltip there. Otherwise,
either fetch_tip() is running in the subprocess itself or it was called
in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(expression,), {})
else:
entity = self.get_entity(expression)
return get_arg_text(entity)
def get_entity(self, expression):
"""Return the object corresponding to expression evaluated
in a namespace spanning sys.modules and __main.dict__.
"""
if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(expression, namespace)
except BaseException:
# An uncaught exception closes idle, and eval can raise any
# exception, especially if user classes are involved.
return None
def _find_constructor(class_ob):
# Given a class object, return a function object used for the
# constructor (ie, __init__() ) or None if we can't find one.
try:
return class_ob.__init__.im_func
except AttributeError:
for base in class_ob.__bases__:
rc = _find_constructor(base)
if rc is not None: return rc
return None
def get_arg_text(ob):
"""Get a string describing the arguments for the given object,
only if it is callable."""
arg_text = ""
if ob is not None and hasattr(ob, '__call__'):
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text
#################################################
#
# Test code
#
if __name__=='__main__':
def t1(): "()"
def t2(a, b=None): "(a, b=None)"
def t3(a, *args): "(a, ...)"
def t4(*args): "(...)"
def t5(a, *args): "(a, ...)"
def t6(a, b=None, *args, **kw): "(a, b=None, ..., ***)"
def t7((a, b), c, (d, e)): "(<tuple>, c, <tuple>)"
class TC(object):
"(ai=None, ...)"
def __init__(self, ai=None, *b): "(ai=None, ...)"
def t1(self): "()"
def t2(self, ai, b=None): "(ai, b=None)"
def t3(self, ai, *args): "(ai, ...)"
def t4(self, *args): "(...)"
def t5(self, ai, *args): "(ai, ...)"
def t6(self, ai, b=None, *args, **kw): "(ai, b=None, ..., ***)"
def t7(self, (ai, b), c, (d, e)): "(<tuple>, c, <tuple>)"
def test(tests):
ct = CallTips()
failed=[]
for t in tests:
expected = t.__doc__ + "\n" + t.__doc__
name = t.__name__
# exercise fetch_tip(), not just get_arg_text()
try:
qualified_name = "%s.%s" % (t.im_class.__name__, name)
except AttributeError:
qualified_name = name
arg_text = ct.fetch_tip(qualified_name)
if arg_text != expected:
failed.append(t)
fmt = "%s - expected %s, but got %s"
print fmt % (t.__name__, expected, get_arg_text(t))
print "%d of %d tests failed" % (len(failed), len(tests))
tc = TC()
tests = (t1, t2, t3, t4, t5, t6, t7,
TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7)
test(tests)
| apache-2.0 | 266,445,088,186,638,430 | 34.137168 | 79 | 0.541619 | false |
felixma/nova | nova/api/openstack/compute/schemas/fixed_ips.py | 79 | 1027 | # Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
reserve = {
'type': 'object',
'properties': {
'reserve': parameter_types.none,
},
'required': ['reserve'],
'additionalProperties': False,
}
unreserve = {
'type': 'object',
'properties': {
'unreserve': parameter_types.none,
},
'required': ['unreserve'],
'additionalProperties': False,
}
| apache-2.0 | -8,297,483,588,647,857,000 | 27.527778 | 78 | 0.674781 | false |
apache/airflow | airflow/providers/google/cloud/example_dags/example_translate_speech.py | 3 | 3196 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import models
from airflow.providers.google.cloud.operators.text_to_speech import CloudTextToSpeechSynthesizeOperator
from airflow.providers.google.cloud.operators.translate_speech import CloudTranslateSpeechOperator
from airflow.utils import dates
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BUCKET_NAME = os.environ.get("GCP_TRANSLATE_SPEECH_TEST_BUCKET", "INVALID BUCKET NAME")
# [START howto_operator_translate_speech_gcp_filename]
FILENAME = "gcp-speech-test-file"
# [END howto_operator_translate_speech_gcp_filename]
# [START howto_operator_text_to_speech_api_arguments]
INPUT = {"text": "Sample text for demo purposes"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "LINEAR16"}
# [END howto_operator_text_to_speech_api_arguments]
# [START howto_operator_translate_speech_arguments]
CONFIG = {"encoding": "LINEAR16", "language_code": "en_US"}
AUDIO = {"uri": f"gs://{BUCKET_NAME}/{FILENAME}"}
TARGET_LANGUAGE = 'pl'
FORMAT = 'text'
MODEL = 'base'
SOURCE_LANGUAGE = None # type: None
# [END howto_operator_translate_speech_arguments]
with models.DAG(
"example_gcp_translate_speech",
schedule_interval=None, # Override to match your needs
start_date=dates.days_ago(1),
tags=['example'],
) as dag:
text_to_speech_synthesize_task = CloudTextToSpeechSynthesizeOperator(
project_id=GCP_PROJECT_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=BUCKET_NAME,
target_filename=FILENAME,
task_id="text_to_speech_synthesize_task",
)
# [START howto_operator_translate_speech]
translate_speech_task = CloudTranslateSpeechOperator(
project_id=GCP_PROJECT_ID,
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task',
)
translate_speech_task2 = CloudTranslateSpeechOperator(
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task2',
)
# [END howto_operator_translate_speech]
text_to_speech_synthesize_task >> translate_speech_task >> translate_speech_task2
| apache-2.0 | 643,132,891,427,086,100 | 36.6 | 103 | 0.717772 | false |
fusionpig/ansible | v1/tests/TestSynchronize.py | 103 | 6958 |
import unittest
import getpass
import os
import shutil
import time
import tempfile
from nose.plugins.skip import SkipTest
from ansible.runner.action_plugins.synchronize import ActionModule as Synchronize
class FakeRunner(object):
def __init__(self):
self.connection = None
self.transport = None
self.basedir = None
self.sudo = None
self.remote_user = None
self.private_key_file = None
self.check = False
self.become = False
self.become_method = 'sudo'
self.become_user = False
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None,
persist_files=False, complex_args=None, delete_remote_tmp=True):
self.executed_conn = conn
self.executed_tmp = tmp
self.executed_module_name = module_name
self.executed_args = args
self.executed_async_jid = async_jid
self.executed_async_module = async_module
self.executed_async_limit = async_limit
self.executed_inject = inject
self.executed_persist_files = persist_files
self.executed_complex_args = complex_args
self.executed_delete_remote_tmp = delete_remote_tmp
def noop_on_check(self, inject):
return self.check
class FakeConn(object):
def __init__(self):
self.host = None
self.delegate = None
class TestSynchronize(unittest.TestCase):
def test_synchronize_action_basic(self):
""" verify the synchronize action plugin sets
the delegate to 127.0.0.1 and remote path to user@host:/path """
runner = FakeRunner()
runner.remote_user = "root"
runner.transport = "ssh"
conn = FakeConn()
inject = {
'inventory_hostname': "el6.lab.net",
'inventory_hostname_short': "el6",
'ansible_connection': None,
'ansible_ssh_user': 'root',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_complex_args == {"dest":"[email protected]:/tmp/bar", "src":"/tmp/foo"}, "wrong args used"
assert runner.sudo == None, "sudo was not reset to None"
def test_synchronize_action_sudo(self):
""" verify the synchronize action plugin unsets and then sets sudo """
runner = FakeRunner()
runner.become = True
runner.remote_user = "root"
runner.transport = "ssh"
conn = FakeConn()
inject = {
'inventory_hostname': "el6.lab.net",
'inventory_hostname_short': "el6",
'ansible_connection': None,
'ansible_ssh_user': 'root',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_complex_args == {'dest':'[email protected]:/tmp/bar',
'src':'/tmp/foo',
'rsync_path':'"sudo rsync"'}, "wrong args used"
assert runner.become == True, "sudo was not reset to True"
def test_synchronize_action_local(self):
""" verify the synchronize action plugin sets
the delegate to 127.0.0.1 and does not alter the dest """
runner = FakeRunner()
runner.remote_user = "jtanner"
runner.transport = "paramiko"
conn = FakeConn()
conn.host = "127.0.0.1"
conn.delegate = "thishost"
inject = {
'inventory_hostname': "thishost",
'ansible_ssh_host': '127.0.0.1',
'ansible_connection': 'local',
'delegate_to': None,
'playbook_dir': '.',
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.transport == "paramiko", "runner transport was changed"
assert runner.remote_user == "jtanner", "runner remote_user was changed"
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert "dest_port" not in runner.executed_complex_args, "dest_port should not have been set"
assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
assert runner.executed_complex_args.get("dest") == "/tmp/bar", "dest was set incorrectly"
def test_synchronize_action_vagrant(self):
""" Verify the action plugin accommodates the common
scenarios for vagrant boxes. """
runner = FakeRunner()
runner.remote_user = "jtanner"
runner.transport = "ssh"
conn = FakeConn()
conn.host = "127.0.0.1"
conn.delegate = "thishost"
inject = {
'inventory_hostname': "thishost",
'ansible_ssh_user': 'vagrant',
'ansible_ssh_host': '127.0.0.1',
'ansible_ssh_port': '2222',
'delegate_to': None,
'playbook_dir': '.',
'hostvars': {
'thishost': {
'inventory_hostname': 'thishost',
'ansible_ssh_port': '2222',
'ansible_ssh_host': '127.0.0.1',
'ansible_ssh_user': 'vagrant'
}
}
}
x = Synchronize(runner)
x.setup("synchronize", inject)
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.transport == "ssh", "runner transport was changed"
assert runner.remote_user == "jtanner", "runner remote_user was changed"
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
assert runner.executed_inject['ansible_ssh_user'] == "vagrant", "runner user was changed"
assert runner.executed_complex_args.get("dest_port") == "2222", "remote port was not set to 2222"
assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
assert runner.executed_complex_args.get("dest") == "[email protected]:/tmp/bar", "dest was set incorrectly"
| gpl-3.0 | 5,844,200,755,748,302,000 | 38.534091 | 120 | 0.555188 | false |
danakj/chromium | tools/grit/grit/format/chrome_messages_json_unittest.py | 23 | 3612 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for chrome_messages_json.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import grd_reader
from grit import util
from grit.tool import build
class ChromeMessagesJsonFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_SIMPLE_MESSAGE">
Simple message.
</message>
<message name="IDS_QUOTES">
element\u2019s \u201c<ph name="NAME">%s<ex>name</ex></ph>\u201d attribute
</message>
<message name="IDS_PLACEHOLDERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
<message name="IDS_PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
<message name="IDS_STARTS_WITH_SPACE">
''' (<ph name="COUNT">%d<ex>2</ex></ph>)
</message>
<message name="IDS_ENDS_WITH_SPACE">
(<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_SPACE_AT_BOTH_ENDS">
''' (<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_DOUBLE_QUOTES">
A "double quoted" message.
</message>
<message name="IDS_BACKSLASH">
\\
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = u"""
{
"SIMPLE_MESSAGE": {
"message": "Simple message."
},
"QUOTES": {
"message": "element\\u2019s \\u201c%s\\u201d attribute"
},
"PLACEHOLDERS": {
"message": "%1$d error, %2$d warning"
},
"PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE": {
"message": "$1$test$2$",
"placeholders": {
"1": {
"content": "$1"
},
"2": {
"content": "$2"
}
}
},
"STARTS_WITH_SPACE": {
"message": " (%d)"
},
"ENDS_WITH_SPACE": {
"message": "(%d) "
},
"SPACE_AT_BOTH_ENDS": {
"message": " (%d) "
},
"DOUBLE_QUOTES": {
"message": "A \\"double quoted\\" message."
},
"BACKSLASH": {
"message": "\\\\"
}
}
"""
self.assertEqual(test.strip(), output.strip())
def testTranslations(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'), buf)
output = buf.getvalue()
test = u"""
{
"ID_HELLO": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4!"
},
"ID_HELLO_USER": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4 %s"
}
}
"""
self.assertEqual(test.strip(), output.strip())
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 5,965,805,635,718,164,000 | 24.258741 | 116 | 0.565615 | false |
sadaf2605/django | django/db/models/sql/subqueries.py | 9 | 8284 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
for field, model, val in values_seq:
if hasattr(val, 'resolve_expression'):
# Resolve expressions here so that annotations are no longer needed
val = val.resolve_expression(self, allow_joins=False, for_save=True)
self.values.append((field, model, val))
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| bsd-3-clause | -2,984,305,642,480,932,000 | 36.484163 | 107 | 0.599469 | false |
cmeon/AndyImage | andy_image_resize.py | 1 | 1293 | #!/usr/bin/env python
import sys
from os import path, mkdir
from vipsCC import *
sizes = { 'ldpi':3, 'mdpi':4, 'hdpi':6, 'xhdpi':8, 'xxhdpi':12, 'xxxhdpi':16 }
if ( len(sys.argv) < 2):
print """
(H)Andy Image Resize
-----------------------------------
This program resizes images into ldpi to xxxhdpi
** It uses xxhdpi as the base image size and not hdpi like in the Android docs.
usage: andy_image_resize.py <image> [<folder>]
<image> - filename of the image file with extension.
<folder> - may be the path to resource folder of an Android app project.
"""
exit(1)
try:
fullname = sys.argv[1]
basename = path.basename(sys.argv[1])
filename, extension = tuple(path.splitext(basename))
image = VImage.VImage(fullname)
basefolder = '.'
try:
basefolder = sys.argv[2]
except IndexError, e:
print 'Printing on current folder'
for k, v in sizes.items():
red = float(16/v)
folder = basefolder+'/'+'drawable-'+k
try:
mkdir(folder)
except OSError, e:
image.shrink(red, red).write(folder +'/'+ filename+extension)
else:
image.shrink(red, red).write(folder +'/'+ filename+extension)
except VError.VError, e:
e.perror(sys.argv[0])
| mit | -8,723,088,299,091,839,000 | 27.108696 | 81 | 0.596288 | false |
HexHive/datashield | compiler/llvm/utils/lit/lit/ShCommands.py | 87 | 2696 | class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
| gpl-3.0 | 4,670,977,233,172,282,000 | 30.717647 | 69 | 0.500371 | false |
BizzCloud/PosBox | addons/marketing_campaign_crm_demo/__openerp__.py | 119 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaign - Demo',
'version': '1.0',
'depends': ['marketing_campaign',
'crm',
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
Demo data for the module marketing_campaign.
============================================
Creates demo data like leads, campaigns and segments for the module marketing_campaign.
""",
'website': 'http://www.openerp.com',
'data': [],
'demo': ['marketing_campaign_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/campaigns.jpeg','images/email_templates.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,796,450,197,818,860,000 | 37.068182 | 87 | 0.590448 | false |
vitaly-krugl/pika | pika/heartbeat.py | 1 | 8261 | """Handle AMQP Heartbeats"""
import logging
import pika.exceptions
from pika import frame
LOGGER = logging.getLogger(__name__)
class HeartbeatChecker(object):
"""Sends heartbeats to the broker. The provided timeout is used to
determine if the connection is stale - no received heartbeats or
other activity will close the connection. See the parameter list for more
details.
"""
_STALE_CONNECTION = "No activity or too many missed heartbeats in the last %i seconds"
def __init__(self, connection, timeout):
"""Create an object that will check for activity on the provided
connection as well as receive heartbeat frames from the broker. The
timeout parameter defines a window within which this activity must
happen. If not, the connection is considered dead and closed.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
:param pika.connection.Connection: Connection object
:param int timeout: Connection idle timeout. If no activity occurs on the
connection nor heartbeat frames received during the
timeout window the connection will be closed. The
interval used to send heartbeats is calculated from
this value by dividing it by two.
"""
if timeout < 1:
raise ValueError('timeout must >= 0, but got %r' % (timeout,))
self._connection = connection
# Note: see the following documents:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
# https://github.com/pika/pika/pull/1072
# https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion
# There is a certain amount of confusion around how client developers
# interpret the spec. The spec talks about 2 missed heartbeats as a
# *timeout*, plus that any activity on the connection counts for a
# heartbeat. This is to avoid edge cases and not to depend on network
# latency.
self._timeout = timeout
self._send_interval = float(timeout) / 2
# Note: Pika will calculate the heartbeat / connectivity check interval
# by adding 5 seconds to the negotiated timeout to leave a bit of room
# for broker heartbeats that may be right at the edge of the timeout
# window. This is different behavior from the RabbitMQ Java client and
# the spec that suggests a check interval equivalent to two times the
# heartbeat timeout value. But, one advantage of adding a small amount
# is that bad connections will be detected faster.
# https://github.com/pika/pika/pull/1072#issuecomment-397850795
# https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780
# https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192
self._check_interval = timeout + 5
LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',
self._timeout,
self._send_interval,
self._check_interval)
# Initialize counters
self._bytes_received = 0
self._bytes_sent = 0
self._heartbeat_frames_received = 0
self._heartbeat_frames_sent = 0
self._idle_byte_intervals = 0
self._send_timer = None
self._check_timer = None
self._start_send_timer()
self._start_check_timer()
@property
def bytes_received_on_connection(self):
"""Return the number of bytes received by the connection bytes object.
:rtype int
"""
return self._connection.bytes_received
@property
def connection_is_idle(self):
"""Returns true if the byte count hasn't changed in enough intervals
to trip the max idle threshold.
"""
return self._idle_byte_intervals > 0
def received(self):
"""Called when a heartbeat is received"""
LOGGER.debug('Received heartbeat frame')
self._heartbeat_frames_received += 1
def _send_heartbeat(self):
"""Invoked by a timer to send a heartbeat when we need to.
"""
LOGGER.debug('Sending heartbeat frame')
self._send_heartbeat_frame()
self._start_send_timer()
def _check_heartbeat(self):
"""Invoked by a timer to check for broker heartbeats. Checks to see
if we've missed any heartbeats and disconnect our connection if it's
been idle too long.
"""
if self._has_received_data:
self._idle_byte_intervals = 0
else:
# Connection has not received any data, increment the counter
self._idle_byte_intervals += 1
LOGGER.debug('Received %i heartbeat frames, sent %i, '
'idle intervals %i',
self._heartbeat_frames_received,
self._heartbeat_frames_sent,
self._idle_byte_intervals)
if self.connection_is_idle:
self._close_connection()
return
self._start_check_timer()
def stop(self):
"""Stop the heartbeat checker"""
if self._send_timer:
LOGGER.debug('Removing timer for next heartbeat send interval')
self._connection._adapter_remove_timeout(self._send_timer) # pylint: disable=W0212
self._send_timer = None
if self._check_timer:
LOGGER.debug('Removing timer for next heartbeat check interval')
self._connection._adapter_remove_timeout(self._check_timer) # pylint: disable=W0212
self._check_timer = None
def _close_connection(self):
"""Close the connection with the AMQP Connection-Forced value."""
LOGGER.info('Connection is idle, %i stale byte intervals',
self._idle_byte_intervals)
text = HeartbeatChecker._STALE_CONNECTION % self._timeout
# Abort the stream connection. There is no point trying to gracefully
# close the AMQP connection since lack of heartbeat suggests that the
# stream is dead.
self._connection._terminate_stream( # pylint: disable=W0212
pika.exceptions.AMQPHeartbeatTimeout(text))
@property
def _has_received_data(self):
"""Returns True if the connection has received data.
:rtype: bool
"""
return self._bytes_received != self.bytes_received_on_connection
@staticmethod
def _new_heartbeat_frame():
"""Return a new heartbeat frame.
:rtype pika.frame.Heartbeat
"""
return frame.Heartbeat()
def _send_heartbeat_frame(self):
"""Send a heartbeat frame on the connection.
"""
LOGGER.debug('Sending heartbeat frame')
self._connection._send_frame( # pylint: disable=W0212
self._new_heartbeat_frame())
self._heartbeat_frames_sent += 1
def _start_send_timer(self):
"""Start a new heartbeat send timer."""
self._send_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212
self._send_interval,
self._send_heartbeat)
def _start_check_timer(self):
"""Start a new heartbeat check timer."""
# Note: update counters now to get current values
# at the start of the timeout window. Values will be
# checked against the connection's byte count at the
# end of the window
self._update_counters()
self._check_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212
self._check_interval,
self._check_heartbeat)
def _update_counters(self):
"""Update the internal counters for bytes sent and received and the
number of frames received
"""
self._bytes_sent = self._connection.bytes_sent
self._bytes_received = self._connection.bytes_received
| bsd-3-clause | -8,952,741,432,178,022,000 | 37.966981 | 172 | 0.63249 | false |
rmccoy7541/egillettii-rnaseq | scripts/snp_performance_analysis.py | 1 | 3682 | #! /bin/env python
import sys
from optparse import OptionParser
import copy
import matplotlib
matplotlib.use('Agg')
import pylab
import scipy.optimize
import numpy
from numpy import array
import dadi
import os
#call ms program from within dadi, using optimized parameters (converted to ms units)
core = "-n 1 0.922 -n 2 0.104 -ej 0.0330 2 1 -en 0.0330 1 1"
command = dadi.Misc.ms_command(100000, (12,12), core, 1, 2000)
ms_fs = dadi.Spectrum.from_ms_file(os.popen(command))
#modify the following line to adjust the sample size of SNPs used for inference
scaled_ms_fs = ms_fs.fixed_size_sample(2000)
scaled_ms_fs = scaled_ms_fs.fold()
#import demographic models
import gillettii_models
def runModel(outFile, nuW_start, nuC_start, T_start):
# Extract the spectrum from ms output
fs = scaled_ms_fs
ns = fs.sample_sizes
print 'sample sizes:', ns
# These are the grid point settings will use for extrapolation.
pts_l = [20,30,40]
# suggested that the smallest grid be slightly larger than the largest sample size. But this may take a long time.
# bottleneck_split model
func = gillettii_models.bottleneck_split
params = array([nuW_start, nuC_start, T_start])
upper_bound = [30, 10, 10]
lower_bound = [1e-5, 1e-10, 0]
# Make the extrapolating version of the demographic model function.
func_ex = dadi.Numerics.make_extrap_func(func)
# Calculate the model AFS
model = func_ex(params, ns, pts_l)
# Calculate likelihood of the data given the model AFS
# Likelihood of the data given the model AFS.
ll_model = dadi.Inference.ll_multinom(model, fs)
print 'Model log-likelihood:', ll_model, "\n"
# The optimal value of theta given the model.
theta = dadi.Inference.optimal_sfs_scaling(model, fs)
p0 = dadi.Misc.perturb_params(params, fold=1, lower_bound=lower_bound, upper_bound=upper_bound)
print 'perturbed parameters: ', p0, "\n"
popt = dadi.Inference.optimize_log_fmin(p0, fs, func_ex, pts_l, upper_bound=upper_bound, lower_bound=lower_bound, maxiter=None, verbose=len(params))
print 'Optimized parameters:', repr(popt), "\n"
#use the optimized parameters in a new model to try to get the parameters to converge
new_model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(new_model, fs)
print 'Optimized log-likelihood:', ll_opt, "\n"
# Write the parameters and log-likelihood to the outFile
s = str(nuW_start) + '\t' + str(nuC_start) + '\t' + str(T_start) + '\t'
for i in range(0, len(popt)):
s += str(popt[i]) + '\t'
s += str(ll_opt) + '\n'
outFile.write(s)
#################
def mkOptionParser():
""" Defines options and returns parser """
usage = """%prog <outFN> <nuW_start> <nuC_start> <T_start>
%prog performs demographic inference on gillettii RNA-seq data. """
parser = OptionParser(usage)
return parser
def main():
""" see usage in mkOptionParser. """
parser = mkOptionParser()
options, args= parser.parse_args()
if len(args) != 4:
parser.error("Incorrect number of arguments")
outFN = args[0]
nuW_start = float(args[1])
nuC_start = float(args[2])
T_start = float(args[3])
if outFN == '-':
outFile = sys.stdout
else:
outFile = open(outFN, 'a')
runModel(outFile, nuW_start, nuC_start, T_start)
#run main
if __name__ == '__main__':
main()
| mit | -6,656,796,103,224,141,000 | 31.298246 | 157 | 0.617599 | false |
andela-bojengwa/talk | venv/lib/python2.7/site-packages/rest_framework/viewsets.py | 21 | 5303 | """
ViewSets are essentially just a type of class based view, that doesn't provide
any method handlers, such as `get()`, `post()`, etc... but instead has actions,
such as `list()`, `retrieve()`, `create()`, etc...
Actions are only bound to methods at the point of instantiating the views.
user_list = UserViewSet.as_view({'get': 'list'})
user_detail = UserViewSet.as_view({'get': 'retrieve'})
Typically, rather than instantiate views from viewsets directly, you'll
register the viewset with a router and let the URL conf be determined
automatically.
router = DefaultRouter()
router.register(r'users', UserViewSet, 'user')
urlpatterns = router.urls
"""
from __future__ import unicode_literals
from functools import update_wrapper
from django.utils.decorators import classonlymethod
from django.views.decorators.csrf import csrf_exempt
from rest_framework import views, generics, mixins
class ViewSetMixin(object):
"""
This is the magic.
Overrides `.as_view()` so that it takes an `actions` keyword that performs
the binding of HTTP methods to actions on the Resource.
For example, to create a concrete view binding the 'GET' and 'POST' methods
to the 'list' and 'create' actions...
view = MyViewSet.as_view({'get': 'list', 'post': 'create'})
"""
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""
Because of the way class based views create a closure around the
instantiated view, we need to totally reimplement `.as_view`,
and slightly modify the view function that is created and returned.
"""
# The suffix initkwarg is reserved for identifying the viewset type
# eg. 'List' or 'Instance'.
cls.suffix = None
# actions must not be empty
if not actions:
raise TypeError("The `actions` argument must be provided when "
"calling `.as_view()` on a ViewSet. For example "
"`.as_view({'get': 'list'})`")
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
# Patch this in as it's otherwise only present from 1.5 onwards
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.suffix = initkwargs.get('suffix', None)
return csrf_exempt(view)
def initialize_request(self, request, *args, **kwargs):
"""
Set the `.action` attribute on the view,
depending on the request method.
"""
request = super(ViewSetMixin, self).initialize_request(request, *args, **kwargs)
self.action = self.action_map.get(request.method.lower())
return request
class ViewSet(ViewSetMixin, views.APIView):
"""
The base ViewSet class does not provide any actions by default.
"""
pass
class GenericViewSet(ViewSetMixin, generics.GenericAPIView):
"""
The GenericViewSet class does not provide any actions by default,
but does include the base set of generic view behavior, such as
the `get_object` and `get_queryset` methods.
"""
pass
class ReadOnlyModelViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `list()` and `retrieve()` actions.
"""
pass
class ModelViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `create()`, `retrieve()`, `update()`,
`partial_update()`, `destroy()` and `list()` actions.
"""
pass
| mit | -6,967,431,628,578,942,000 | 35.321918 | 88 | 0.610598 | false |
mit0110/oppia | core/tests/test_util_jobs.py | 19 | 4472 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs operating on explorations that can be used for production tests.
To use these jobs, first need to register them in jobs_registry (at
the moment they are not displayed there to avoid accidental use)."""
from core import jobs
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.platform import models
import feconf
(base_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration])
class ExpCopiesRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
pass
class ExpCopiesAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job creating 10 published copies of every
existing exploration, with the eid being '[old_eid]copy[copy_number]',
title 'Copy' and category 'Copies'.
"""
@classmethod
def get_event_types_listened_to(cls):
return []
@classmethod
def _get_realtime_datastore_class(cls):
return ExpCopiesRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return ExpCopiesMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class ExpCopiesMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""A continuous-computation job creating 10 published copies of every
existing exploration, with the eid being '[old_eid]copy[copy_number]',
title 'Copy' and category 'Copies'.
"""
@classmethod
def _get_continuous_computation_class(cls):
return ExpCopiesAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if ExpCopiesMRJobManager._entity_created_before_job_queued(item):
for count in range(10):
yield ('%scopy%d' % (item.id, count),
exp_services.get_exploration_from_model(item).to_yaml())
@staticmethod
def reduce(exp_id, list_of_exps):
for stringified_exp in list_of_exps:
exploration = exp_domain.Exploration.from_untitled_yaml(
exp_id, 'Copy', 'Copies', stringified_exp)
exp_services.save_new_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration)
rights_manager.publish_exploration(
feconf.SYSTEM_COMMITTER_ID, exp_id)
# Job to delete all copied explorations.
class DeleteExpCopiesRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
pass
class DeleteExpCopiesAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job deleting all explorations in category
'Copies'.
"""
@classmethod
def get_event_types_listened_to(cls):
return []
@classmethod
def _get_realtime_datastore_class(cls):
return DeleteExpCopiesRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return DeleteExpCopiesMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class DeleteExpCopiesMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Job that deletes all explorations in category 'Copies'.
"""
@classmethod
def _get_continuous_computation_class(cls):
return DeleteExpCopiesAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.category == 'Copies':
exp_services.delete_exploration(
feconf.SYSTEM_COMMITTER_ID, item.id, force_deletion=True)
@staticmethod
def reduce(exp_id, list_of_exps):
pass
| apache-2.0 | -4,002,655,365,093,000,000 | 31.172662 | 79 | 0.702818 | false |
dakerfp/AutobahnPython | examples/twisted/wamp/basic/rpc/timeservice/backend.py | 8 | 1139 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
A simple time service application component.
"""
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
| apache-2.0 | -424,877,754,839,242,300 | 30.638889 | 79 | 0.602283 | false |
jelugbo/hebs_master | lms/djangoapps/licenses/tests.py | 30 | 9472 | """Tests for License package"""
import logging
import json
from uuid import uuid4
from random import shuffle
from tempfile import NamedTemporaryFile
import factory
from factory.django import DjangoModelFactory
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from nose.tools import assert_true # pylint: disable=E0611
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from licenses.models import CourseSoftware, UserLicense
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
COURSE_1 = 'edX/toy/2012_Fall'
SOFTWARE_1 = 'matlab'
SOFTWARE_2 = 'stata'
SERIAL_1 = '123456abcde'
log = logging.getLogger(__name__)
class CourseSoftwareFactory(DjangoModelFactory):
'''Factory for generating CourseSoftware objects in database'''
FACTORY_FOR = CourseSoftware
name = SOFTWARE_1
full_name = SOFTWARE_1
url = SOFTWARE_1
course_id = COURSE_1
class UserLicenseFactory(DjangoModelFactory):
'''
Factory for generating UserLicense objects in database
By default, the user assigned is null, indicating that the
serial number has not yet been assigned.
'''
FACTORY_FOR = UserLicense
user = None
software = factory.SubFactory(CourseSoftwareFactory)
serial = SERIAL_1
class LicenseTestCase(TestCase):
'''Tests for licenses.views'''
def setUp(self):
'''creates a user and logs in'''
# self.setup_viewtest_user()
self.user = UserFactory(username='test',
email='[email protected]', password='test_password')
self.client = Client()
assert_true(self.client.login(username='test', password='test_password'))
self.software = CourseSoftwareFactory()
def test_get_license(self):
UserLicenseFactory(user=self.user, software=self.software)
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('error' in json_returned)
self.assertTrue('serial' in json_returned)
self.assertEquals(json_returned['serial'], SERIAL_1)
def test_get_nonexistent_license(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('serial' in json_returned)
self.assertTrue('error' in json_returned)
def test_create_nonexistent_license(self):
'''Should not assign a license to an unlicensed user when none are available'''
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'true'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('serial' in json_returned)
self.assertTrue('error' in json_returned)
def test_create_license(self):
'''Should assign a license to an unlicensed user if one is unassigned'''
# create an unassigned license
UserLicenseFactory(software=self.software)
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'true'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(200, response.status_code)
json_returned = json.loads(response.content)
self.assertFalse('error' in json_returned)
self.assertTrue('serial' in json_returned)
self.assertEquals(json_returned['serial'], SERIAL_1)
def test_get_license_from_wrong_course(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format('some/other/course'))
self.assertEqual(404, response.status_code)
def test_get_license_from_non_ajax(self):
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(404, response.status_code)
def test_get_license_without_software(self):
response = self.client.post(reverse('user_software_license'),
{'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
self.assertEqual(404, response.status_code)
def test_get_license_without_login(self):
self.client.logout()
response = self.client.post(reverse('user_software_license'),
{'software': SOFTWARE_1, 'generate': 'false'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_REFERER='/courses/{0}/some_page'.format(COURSE_1))
# if we're not logged in, we should be referred to the login page
self.assertEqual(302, response.status_code)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CommandTest(ModuleStoreTestCase):
'''Test management command for importing serial numbers'''
def setUp(self):
course = CourseFactory.create()
self.course_id = course.id
def test_import_serial_numbers(self):
size = 20
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_2, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('There should be only 2 course-software entries')
software_count = CourseSoftware.objects.all().count()
self.assertEqual(2, software_count)
log.debug('We added two sets of {0} serials'.format(size))
licenses_count = UserLicense.objects.all().count()
self.assertEqual(2 * size, licenses_count)
log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1))
with generate_serials_file(size) as temp_file:
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, temp_file.name]
call_command('import_serial_numbers', *args)
log.debug('There should be still only 2 course-software entries')
software_count = CourseSoftware.objects.all().count()
self.assertEqual(2, software_count)
log.debug('Now we should have 3 sets of 20 serials'.format(size))
licenses_count = UserLicense.objects.all().count()
self.assertEqual(3 * size, licenses_count)
software = CourseSoftware.objects.get(pk=1)
lics = UserLicense.objects.filter(software=software)[:size]
known_serials = list(l.serial for l in lics)
known_serials.extend(generate_serials(10))
shuffle(known_serials)
log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1))
with NamedTemporaryFile() as tmpfile:
tmpfile.write('\n'.join(known_serials))
tmpfile.flush()
args = [self.course_id.to_deprecated_string(), SOFTWARE_1, tmpfile.name]
call_command('import_serial_numbers', *args)
log.debug('Check if we added only the new ones')
licenses_count = UserLicense.objects.filter(software=software).count()
self.assertEqual((2 * size) + 10, licenses_count)
def generate_serials(size=20):
'''generate a list of serial numbers'''
return [str(uuid4()) for _ in range(size)]
def generate_serials_file(size=20):
'''output list of generated serial numbers to a temp file'''
serials = generate_serials(size)
temp_file = NamedTemporaryFile()
temp_file.write('\n'.join(serials))
temp_file.flush()
return temp_file
| agpl-3.0 | 945,123,290,270,383,700 | 41.859729 | 102 | 0.628906 | false |
ttroy50/vsid | tools/reset_protocol_model.py | 1 | 2350 | #!/usr/bin/python
#
"""
Reset an Protocol in the database to 0
"""
import sys
import yaml
from optparse import OptionParser
def reset_protocol(file, dest, name, attributes=None):
try:
with open(file, 'r') as stream:
database = yaml.load(stream)
except Exception, ex:
print "Exception loading db file : %s" %ex
sys.exit(1)
if database is None:
print "Unable to load yaml %s" %ex
sys.exit(1)
for proto in database["ProtocolModels"]:
if proto["ProtocolName"] == name:
print "resetting %s" % proto["ProtocolName"]
for meter in proto["AttributeMeters"]:
if attributes is not None:
if meter["AttributeName"] not in attributes:
continue
print "resetting %s" % meter["AttributeName"]
num = len(meter["FingerPrint"])
for val in range(0, num):
meter["FingerPrint"][val] = 0
if dest is not None:
with open(dest, 'w') as outfile:
outfile.write( yaml.dump(database, default_flow_style=True, explicit_start=True) )
else:
print yaml.dump(database, default_flow_style=True, explicit_start=True)
def main():
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="Database file to load", metavar="FILE")
parser.add_option("-d", "--dest", dest="destfile",
help="Database file to write to. If not supplied will write to stdout", metavar="FILE")
parser.add_option("-n", "--name", dest="name",
help="Protocol Name", metavar="name")
parser.add_option("-a", "--attribute", action="append", dest="attributes",
help="Attributes to reset. Not adding this means all")
(options, args) = parser.parse_args()
if options.filename is None or options.filename == "":
print "ERROR: No Database file supplied\n"
parser.print_help()
sys.exit(1)
if options.name is None or options.name == "":
print "ERROR: No Name\n"
parser.print_help()
sys.exit(1)
reset_protocol(options.filename, options.destfile, options.name, options.attributes)
if __name__ == "__main__":
# execute only if run as a script
main() | mit | -7,178,129,445,476,918,000 | 31.205479 | 109 | 0.578723 | false |
SEL-Columbia/commcare-hq | corehq/apps/reports/tests/test_cache.py | 1 | 5210 | import uuid
from django.http import HttpRequest
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.reports.cache import CacheableRequestMixIn, request_cache
from corehq.apps.users.models import WebUser
class MockReport(CacheableRequestMixIn):
def __init__(self, request, is_cacheable=True):
self.request = request
self.is_cacheable = is_cacheable
@request_cache('v1')
def v1(self):
return uuid.uuid4().hex
@request_cache('v2')
def v2(self):
return uuid.uuid4().hex
BLANK = '__blank__'
def _make_request(path=BLANK, domain=BLANK, user=BLANK):
request = HttpRequest()
if domain != BLANK:
request.domain = domain
if path != BLANK:
request.path = path
if user != BLANK:
request.couch_user = user
return request
class ReportCacheTest(TestCase):
# note: this is pretty tightly coupled with the internals of the cache
# but this is probably ok since that's what it's designed to test
domain = 'cache-test'
def setUp(self):
create_domain(self.domain)
self.web_user1 = WebUser.create(self.domain, 'w1', 'secret')
self.web_user2 = WebUser.create(self.domain, 'w2', 'secret')
def tearDown(self):
self.web_user1.delete()
self.web_user2.delete()
def testBasicFunctionality(self):
report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain),
self.domain, self.web_user1))
v1 = report.v1()
#self.assertEqual(v1, report.v1())
v2 = report.v2()
self.assertEqual(v2, report.v2())
self.assertNotEqual(v1, v2)
copy = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain),
self.domain, self.web_user1))
self.assertEqual(v1, copy.v1())
self.assertEqual(v2, copy.v2())
def testNonCacheable(self):
report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain),
self.domain, self.web_user1),
is_cacheable=False)
v1 = report.v1()
self.assertNotEqual(v1, report.v1())
self.assertNotEqual(report.v1(), report.v1())
def testPathSpecific(self):
report = MockReport(_make_request('/a/{domain}/reports/foobar'.format(domain=self.domain),
self.domain, self.web_user1))
v1 = report.v1()
v2 = report.v1()
alternate_paths = [
'/reports/barbar',
'/reports/foobars',
'/reports/foobar/baz',
'/reports/foobar?bip=bop',
]
for path in alternate_paths:
full_path = '/a/{domain}{path}'.format(domain=self.domain, path=path)
alternate = MockReport(_make_request(full_path, self.domain, self.web_user1))
alt_v1 = alternate.v1()
self.assertEqual(alt_v1, alternate.v1())
alt_v2 = alternate.v2()
self.assertEqual(alt_v2, alternate.v2())
self.assertNotEqual(alt_v1, v1)
self.assertNotEqual(alt_v2, v2)
def testDomainSpecific(self):
path = '/a/{domain}/reports/foobar'.format(domain=self.domain)
report = MockReport(_make_request(path, self.domain, self.web_user1))
v1 = report.v1()
v2 = report.v1()
alternate_domains = [
'cache',
'cachetest',
'cache-testy',
None,
BLANK,
]
for dom in alternate_domains:
alternate = MockReport(_make_request(path, dom, self.web_user1))
alt_v1 = alternate.v1()
# since this is invalid, this shouldn't even be caching itself
self.assertNotEqual(alt_v1, alternate.v1())
alt_v2 = alternate.v2()
self.assertNotEqual(alt_v2, alternate.v2())
self.assertNotEqual(alt_v1, v1)
self.assertNotEqual(alt_v2, v2)
def testUserSpecific(self):
path = '/a/{domain}/reports/foobar'.format(domain=self.domain)
report = MockReport(_make_request(path, self.domain, self.web_user1))
v1 = report.v1()
v2 = report.v1()
alternate = MockReport(_make_request(path, self.domain, self.web_user2))
alt_v1 = alternate.v1()
self.assertEqual(alt_v1, alternate.v1())
alt_v2 = alternate.v2()
self.assertEqual(alt_v2, alternate.v2())
self.assertNotEqual(alt_v1, v1)
self.assertNotEqual(alt_v2, v2)
# invalid users shouldn't even be caching themselves
for invalid in ['not a user object', None, BLANK]:
alternate = MockReport(_make_request(path, self.domain, invalid))
alt_v1 = alternate.v1()
# since this is invalid, this shouldn't even be caching itself
self.assertNotEqual(alt_v1, alternate.v1())
alt_v2 = alternate.v2()
self.assertNotEqual(alt_v2, alternate.v2())
self.assertNotEqual(alt_v1, v1)
self.assertNotEqual(alt_v2, v2) | bsd-3-clause | 1,854,157,568,961,513,200 | 36.76087 | 98 | 0.590595 | false |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/linux2/twisted/trial/reporter.py | 3 | 15233 | # -*- test-case-name: twisted.trial.test.test_trial -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Author: Jonathan D. Simms <[email protected]>
# Original Author: Jonathan Lange <[email protected]>
from __future__ import generators
import sys, types
import warnings
from twisted.python import reflect, failure, log
from twisted.python.compat import adict
from twisted.internet import defer
from twisted.trial import itrial, util
import zope.interface as zi
#******************************************************************************
# turn this off if you're having trouble with traceback printouts or some such
HIDE_TRIAL_INTERNALS = True
#******************************************************************************
# test results, passed as resultType to Reporter.endTest()
STATUSES = (SKIP, EXPECTED_FAILURE, FAILURE,
ERROR, UNEXPECTED_SUCCESS, SUCCESS) = ("skips", "expectedFailures",
"failures", "errors",
"unexpectedSuccesses",
"successes")
WORDS = {SKIP: '[SKIPPED]',
EXPECTED_FAILURE: '[TODO]',
FAILURE: '[FAIL]', ERROR: '[ERROR]',
UNEXPECTED_SUCCESS: '[SUCCESS!?!]',
SUCCESS: '[OK]'}
LETTERS = {SKIP: 'S', EXPECTED_FAILURE: 'T',
FAILURE: 'F', ERROR: 'E',
UNEXPECTED_SUCCESS: '!', SUCCESS: '.'}
SEPARATOR = '-' * 79
DOUBLE_SEPARATOR = '=' * 79
_basefmt = "caught exception in %s, your TestCase is broken\n\n"
SET_UP_CLASS_WARN = _basefmt % 'setUpClass'
SET_UP_WARN = _basefmt % 'setUp'
TEAR_DOWN_WARN = _basefmt % 'tearDown'
TEAR_DOWN_CLASS_WARN = _basefmt % 'tearDownClass'
DIRTY_REACTOR_POLICY_WARN = "This failure will cause all methods in your class to be reported as ERRORs in the summary"
UNCLEAN_REACTOR_WARN = "REACTOR UNCLEAN! traceback(s) follow: "
PASSED, FAILED = "PASSED", "FAILED"
methNameWarnMsg = adict(setUpClass = SET_UP_CLASS_WARN,
setUp = SET_UP_WARN,
tearDown = TEAR_DOWN_WARN,
tearDownClass = TEAR_DOWN_CLASS_WARN)
# ----------------------------------------------------------------------------
def makeLoggingMethod(name, f):
def loggingMethod(*a, **kw):
print "%s.%s(*%r, **%r)" % (name, f.func_name, a, kw)
return f(*a, **kw)
return loggingMethod
class MethodCallLoggingType(type):
def __new__(cls, name, bases, attrs):
for (k, v) in attrs.items():
if isinstance(v, types.FunctionType):
attrs[k] = makeLoggingMethod(name, v)
return super(MethodCallLoggingType, cls).__new__(cls, name, bases,
attrs)
class TestStatsBase(object):
zi.implements(itrial.ITestStats)
importErrors = None
def __init__(self, original):
#print "original: %r" % (original,)
self.original = original
def _collect(self):
raise NotImplementedError, "should be overridden in subclasses"
def get_skips(self):
return self._collect(SKIP)
def get_errors(self):
return self._collect(ERROR)
def get_failures(self):
return self._collect(FAILURE)
def get_expectedFailures(self):
return self._collect(EXPECTED_FAILURE)
def get_unexpectedSuccesses(self):
return self._collect(UNEXPECTED_SUCCESS)
def get_successes(self):
return self._collect(SUCCESS)
def runningTime(self):
o = self.original
return o.endTime - o.startTime
runningTime = property(runningTime)
class TestStats(TestStatsBase):
# this adapter is used for both TestSuite and TestModule objects
importErrors = property(lambda self: getattr(self.original,
'couldNotImport', {}).items())
def _collect(self, status):
meths = []
for r in self.original.children:
meths.extend(r.methodsWithStatus.get(status, []))
return meths
def numTests(self):
n = 0
for r in self.original.children:
ts = itrial.ITestStats(r)
n += ts.numTests()
return n
def allPassed(self):
for r in self.original.children:
if not itrial.ITestStats(r).allPassed:
return False
if getattr(self.original, 'couldNotImport', False):
return False
return True
allPassed = property(allPassed)
class TestCaseStats(TestStatsBase):
def _collect(self, status):
"""return a list of all TestMethods with status"""
return self.original.methodsWithStatus.get(status, [])
def numTests(self):
n = len(self.original.children)
return n
def allPassed(self):
for status in (ERROR, FAILURE):
if status in self.original.methodsWithStatus:
return False
return True
allPassed = property(allPassed)
class DocTestRunnerStats(TestCaseStats):
def numTests(self):
"""DocTestRunners are singleton runners"""
return 1
class BrokenTestCaseWarning(Warning):
"""emitted as a warning when an exception occurs in one of
setUp, tearDown, setUpClass, or tearDownClass"""
class Reporter(object):
zi.implements(itrial.IReporter)
debugger = None
def __init__(self, stream=sys.stdout, tbformat='default', args=None,
realtime=False):
self.stream = stream
self.tbformat = tbformat
self.args = args
self.realtime = realtime
super(Reporter, self).__init__(stream, tbformat, args, realtime)
def setUpReporter(self):
return defer.succeed(None)
def tearDownReporter(self):
return defer.succeed(None)
def startTest(self, method):
pass
def reportImportError(self, name, exc):
pass
def write(self, format, *args):
s = str(format)
assert isinstance(s, type(''))
if args:
self.stream.write(s % args)
else:
self.stream.write(s)
self.stream.flush()
def startModule(self, name):
pass
def startClass(self, klass):
pass
def endModule(self, module):
pass
def endClass(self, klass):
pass
def emitWarning(self, message, category=UserWarning, stacklevel=0):
warnings.warn(message, category, stacklevel - 1)
def upDownError(self, userMeth, warn=True, printStatus=True):
if warn:
minfo = itrial.IMethodInfo(userMeth)
tbStr = '\n'.join([e.getTraceback() for e in userMeth.errors]) # if not e.check(unittest.SkipTest)])
log.msg(tbStr)
msg = "%s%s" % (methNameWarnMsg[minfo.name], tbStr)
warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2)
def cleanupErrors(self, errs):
warnings.warn("%s\n%s" % (UNCLEAN_REACTOR_WARN,
'\n'.join(map(self._formatFailureTraceback, errs))),
BrokenTestCaseWarning)
def endTest(self, method):
method = itrial.ITestMethod(method)
if self.realtime:
for err in method.errors + method.failures:
err.printTraceback(self.stream)
def _formatFailureTraceback(self, fail):
# Short term hack
if isinstance(fail, str):
return fail
detailLevel = self.tbformat
result = fail.getTraceback(detail=detailLevel, elideFrameworkCode=True)
if detailLevel == 'default':
# Apparently trial's tests doen't like the 'Traceback:' line.
result = '\n'.join(result.split('\n')[1:])
return result
def _formatImportError(self, name, error):
"""format an import error for report in the summary section of output
@param name: The name of the module which could not be imported
@param error: The exception which occurred on import
@rtype: str
"""
ret = [DOUBLE_SEPARATOR, '\nIMPORT ERROR:\n\n']
if isinstance(error, failure.Failure):
what = self._formatFailureTraceback(error)
elif type(error) == types.TupleType:
what = error.args[0]
else:
what = "%s\n" % error
ret.append("Could not import %s: \n%s\n" % (name, what))
return ''.join(ret)
def _formatFailedTest(self, name, status, failures, skipMsg=None, todoMsg=None):
ret = [DOUBLE_SEPARATOR, '%s: %s\n' % (WORDS[status], name)]
if skipMsg:
ret.append(self._formatFailureTraceback(skipMsg) + '\n')
if todoMsg:
ret.append(todoMsg + '\n')
if status not in (SUCCESS, SKIP, UNEXPECTED_SUCCESS):
ret.extend(map(self._formatFailureTraceback, failures))
return '\n'.join(ret)
def _reportStatus(self, tsuite):
tstats = itrial.ITestStats(tsuite)
summaries = []
for stat in STATUSES:
num = len(getattr(tstats, "get_%s" % stat)())
if num:
summaries.append('%s=%d' % (stat, num))
summary = (summaries and ' ('+', '.join(summaries)+')') or ''
if tstats.get_failures() or tstats.get_errors():
status = FAILED
else:
status = PASSED
self.write("%s%s\n", status, summary)
def _reportFailures(self, tstats):
for meth in getattr(tstats, "get_%s" % SKIP)():
self.write(self._formatFailedTest(
meth.fullName, meth.status,
meth.errors + meth.failures,
meth.skip,
itrial.ITodo(meth.todo).msg))
for status in [EXPECTED_FAILURE, FAILURE, ERROR]:
for meth in getattr(tstats, "get_%s" % status)():
if meth.hasTbs:
self.write(self._formatFailedTest(
meth.fullName, meth.status,
meth.errors + meth.failures,
meth.skip,
itrial.ITodo(meth.todo).msg))
for name, error in tstats.importErrors:
self.write(self._formatImportError(name, error))
def endSuite(self, suite):
tstats = itrial.ITestStats(suite)
self.write("\n")
self._reportFailures(tstats)
self.write("%s\n" % SEPARATOR)
self.write('Ran %d tests in %.3fs\n', tstats.numTests(),
tstats.runningTime)
self.write('\n')
self._reportStatus(suite)
class MinimalReporter(Reporter):
def endSuite(self, suite):
tstats = itrial.ITestStats(suite)
t = (tstats.runningTime, tstats.numTests(), tstats.numTests(),
# XXX: expectedTests == runTests
len(tstats.importErrors), len(tstats.get_errors()),
len(tstats.get_failures()), len(tstats.get_skips()))
self.stream.write(' '.join(map(str,t))+'\n')
class TextReporter(Reporter):
def __init__(self, stream=sys.stdout, tbformat='default', args=None,
realtime=False):
super(TextReporter, self).__init__(stream, tbformat, args, realtime)
self.seenModules, self.seenClasses = {}, {}
def endTest(self, method):
self.write(LETTERS.get(itrial.ITestMethod(method).status, '?'))
super(TextReporter, self).endTest(method)
class VerboseTextReporter(TextReporter):
# This is actually the bwverbose option
def startTest(self, method):
tm = itrial.ITestMethod(method)
# XXX this is a crap workaround for doctests,
# there should be a better solution.
try:
klass = reflect.qual(tm.klass)
except AttributeError: # not a real class
klass = str(tm.klass)
self.write('%s (%s) ... ', tm.name, klass)
super(VerboseTextReporter, self).startTest(method)
def endTest(self, method):
self.write("%s\n" % WORDS.get(itrial.ITestMethod(method).status,
"[??]"))
class TimingTextReporter(VerboseTextReporter):
def endTest(self, method):
self.write("%s" % WORDS.get(method.status, "[??]") + " "
+ "(%.03f secs)\n" % method.runningTime())
class TreeReporter(VerboseTextReporter):
#__metaclass__ = MethodCallLoggingType
currentLine = ''
columns = 79
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
def __init__(self, stream=sys.stdout, tbformat='default', args=None,
realtime=False):
super(TreeReporter, self).__init__(stream, tbformat, args, realtime)
self.words = {SKIP: ('[SKIPPED]', self.BLUE),
EXPECTED_FAILURE: ('[TODO]', self.BLUE),
FAILURE: ('[FAIL]', self.RED),
ERROR: ('[ERROR]', self.RED),
UNEXPECTED_SUCCESS: ('[SUCCESS!?!]', self.RED),
SUCCESS: ('[OK]', self.GREEN)}
def _getText(self, status):
return self.words.get(status, ('[??]', self.BLUE))
def write(self, format, *args):
if args:
format = format % args
self.currentLine = format
super(TreeReporter, self).write(self.currentLine)
def startModule(self, module):
modName = module.__name__
if modName not in self.seenModules:
self.seenModules[modName] = 1
self.write(' %s\n' % modName)
def startClass(self, klass):
clsName = klass.__name__
qualifiedClsName = reflect.qual(klass)
if qualifiedClsName not in self.seenClasses:
self.seenClasses[qualifiedClsName] = 1
self.write(' %s\n' % clsName)
def cleanupErrors(self, errs):
self.write(self.color(' cleanup errors', self.RED))
self.endLine(*self._getText(ERROR))
super(TreeReporter, self).cleanupErrors(errs)
def upDownError(self, method, warn=True, printStatus=True):
m = itrial.IMethodInfo(method)
self.write(self.color(" %s" % m.name, self.RED))
if printStatus:
self.endLine(*self._getText(ERROR))
super(TreeReporter, self).upDownError(method, warn, printStatus)
def startTest(self, method):
tm = itrial.ITestMethod(method)
if tm.docstr:
# inspect trims whitespace on the left; the lstrip here is
# for those odd folks who start docstrings with a blank line.
what = tm.docstr.lstrip().split('\n', 1)[0]
else:
what = tm.name
self.write(' %s ... ', what)
def endTest(self, method):
Reporter.endTest(self, method)
tm = itrial.ITestMethod(method)
self.endLine(*self._getText(tm.status))
def color(self, text, color):
return '%s%s;1m%s%s0m' % ('\x1b[', color, text, '\x1b[')
def endLine(self, message, color):
spaces = ' ' * (self.columns - len(self.currentLine) - len(message))
super(TreeReporter, self).write(spaces)
super(TreeReporter, self).write("%s\n" % (self.color(message, color),))
| gpl-2.0 | 2,850,688,944,673,330,000 | 32.405702 | 119 | 0.575789 | false |
eebssk1/CAF_MSM_Kernel_msm8916_64 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 | 2,926,102,911,573,132,300 | 24.653333 | 75 | 0.651247 | false |
frreiss/tensorflow-fred | tensorflow/compiler/tests/matrix_triangular_solve_op_test.py | 14 | 7183 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.MatrixTriangularSolve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
def MakePlaceholder(x):
return array_ops.placeholder(dtypes.as_dtype(x.dtype), shape=x.shape)
class MatrixTriangularSolveOpTest(xla_test.XLATestCase):
# MatrixTriangularSolve defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve)
@property
def float_types(self):
return set(super(MatrixTriangularSolveOpTest,
self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _VerifyTriangularSolveBase(self, sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b, verification,
atol):
feed_dict = {placeholder_a: a, placeholder_ca: clean_a, placeholder_b: b}
verification_np = sess.run(verification, feed_dict)
broadcasted_shape = a.shape[:-2] + (b.shape[-2], b.shape[-1])
broadcasted_b = b + np.zeros(shape=broadcasted_shape, dtype=b.dtype)
self.assertAllClose(broadcasted_b, verification_np, atol=atol)
def _VerifyTriangularSolve(self, a, b, lower, adjoint, atol):
clean_a = np.tril(a) if lower else np.triu(a)
with self.session() as sess:
placeholder_a = MakePlaceholder(a)
placeholder_ca = MakePlaceholder(clean_a)
placeholder_b = MakePlaceholder(b)
with self.test_scope():
x = linalg_ops.matrix_triangular_solve(
placeholder_a, placeholder_b, lower=lower, adjoint=adjoint)
verification = test_util.matmul_without_tf32(
placeholder_ca, x, adjoint_a=adjoint)
self._VerifyTriangularSolveBase(sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b,
verification, atol)
def _VerifyTriangularSolveCombo(self, a, b, atol=1e-4):
transp = lambda x: np.swapaxes(x, -1, -2)
for lower, adjoint in itertools.product([True, False], repeat=2):
self._VerifyTriangularSolve(
a if lower else transp(a), b, lower, adjoint, atol)
def testBasic(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5))
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicNotActuallyTriangular(self):
rng = np.random.RandomState(0)
a = rng.randn(5, 5) # the `a` matrix is not lower-triangular
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicComplexDtypes(self):
if xla_test.test.is_built_with_rocm():
# The folowing subtest invokes the call to "BlasTrsm"
# That operation is currently not supported on the ROCm platform
self.skipTest("BlasTrsm op for complex types is not supported in ROCm")
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5) + rng.randn(5, 5) * 1j)
b = rng.randn(5, 7) + rng.randn(5, 7) * 1j
for dtype in self.complex_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBatch(self):
rng = np.random.RandomState(0)
shapes = [((4, 3, 3), (4, 3, 5)), ((1, 2, 2), (1, 2, 1)),
((1, 1, 1), (1, 1, 2)), ((2, 3, 4, 4), (2, 3, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testBatchBroadcast(self):
rng = np.random.RandomState(0)
shapes = [((3, 3), (4, 3, 5)), ((1, 2, 2), (3, 2, 1)), ((1, 1), (1, 1, 2)),
((1, 3, 4, 4), (2, 1, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testLarge(self):
n = 1024
rng = np.random.RandomState(0)
a = np.tril(rng.rand(n, n) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(n, n)
self._VerifyTriangularSolve(
a.astype(np.float32), b.astype(np.float32), True, False, 1e-4)
@test_util.disable_mlir_bridge("Error handling")
def testNonSquareCoefficientMatrix(self):
rng = np.random.RandomState(0)
for dtype in self.float_types:
a = rng.randn(3, 4).astype(dtype)
b = rng.randn(4, 4).astype(dtype)
with self.test_scope():
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
linalg_ops.matrix_triangular_solve(a, b)
@test_util.run_v2_only # Different error types
@test_util.disable_mlir_bridge("Error handling")
def testWrongDimensionsV2(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(errors.InvalidArgumentError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(errors.InvalidArgumentError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
@test_util.run_v1_only("Different error types")
@test_util.disable_mlir_bridge("Error handling")
def testWrongDimensionsV1(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
if __name__ == "__main__":
test.main()
| apache-2.0 | -6,328,305,133,745,971,000 | 40.281609 | 80 | 0.653766 | false |
awatts/boto | boto/sdb/db/model.py | 152 | 10158 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.sdb.db.property import Property
from boto.sdb.db.key import Key
from boto.sdb.db.query import Query
import boto
from boto.compat import filter
class ModelMeta(type):
"Metaclass for all Models"
def __init__(cls, name, bases, dict):
super(ModelMeta, cls).__init__(name, bases, dict)
# Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!)
cls.__sub_classes__ = []
# Do a delayed import to prevent possible circular import errors.
from boto.sdb.db.manager import get_manager
try:
if filter(lambda b: issubclass(b, Model), bases):
for base in bases:
base.__sub_classes__.append(cls)
cls._manager = get_manager(cls)
# look for all of the Properties and set their names
for key in dict.keys():
if isinstance(dict[key], Property):
property = dict[key]
property.__property_config__(cls, key)
prop_names = []
props = cls.properties()
for prop in props:
if not prop.__class__.__name__.startswith('_'):
prop_names.append(prop.name)
setattr(cls, '_prop_names', prop_names)
except NameError:
# 'Model' isn't defined yet, meaning we're looking at our own
# Model class, defined below.
pass
class Model(object):
__metaclass__ = ModelMeta
__consistent__ = False # Consistent is set off by default
id = None
@classmethod
def get_lineage(cls):
l = [c.__name__ for c in cls.mro()]
l.reverse()
return '.'.join(l)
@classmethod
def kind(cls):
return cls.__name__
@classmethod
def _get_by_id(cls, id, manager=None):
if not manager:
manager = cls._manager
return manager.get_object(cls, id)
@classmethod
def get_by_id(cls, ids=None, parent=None):
if isinstance(ids, list):
objs = [cls._get_by_id(id) for id in ids]
return objs
else:
return cls._get_by_id(ids)
get_by_ids = get_by_id
@classmethod
def get_by_key_name(cls, key_names, parent=None):
raise NotImplementedError("Key Names are not currently supported")
@classmethod
def find(cls, limit=None, next_token=None, **params):
q = Query(cls, limit=limit, next_token=next_token)
for key, value in params.items():
q.filter('%s =' % key, value)
return q
@classmethod
def all(cls, limit=None, next_token=None):
return cls.find(limit=limit, next_token=next_token)
@classmethod
def get_or_insert(key_name, **kw):
raise NotImplementedError("get_or_insert not currently supported")
@classmethod
def properties(cls, hidden=True):
properties = []
while cls:
for key in cls.__dict__.keys():
prop = cls.__dict__[key]
if isinstance(prop, Property):
if hidden or not prop.__class__.__name__.startswith('_'):
properties.append(prop)
if len(cls.__bases__) > 0:
cls = cls.__bases__[0]
else:
cls = None
return properties
@classmethod
def find_property(cls, prop_name):
property = None
while cls:
for key in cls.__dict__.keys():
prop = cls.__dict__[key]
if isinstance(prop, Property):
if not prop.__class__.__name__.startswith('_') and prop_name == prop.name:
property = prop
if len(cls.__bases__) > 0:
cls = cls.__bases__[0]
else:
cls = None
return property
@classmethod
def get_xmlmanager(cls):
if not hasattr(cls, '_xmlmanager'):
from boto.sdb.db.manager.xmlmanager import XMLManager
cls._xmlmanager = XMLManager(cls, None, None, None,
None, None, None, None, False)
return cls._xmlmanager
@classmethod
def from_xml(cls, fp):
xmlmanager = cls.get_xmlmanager()
return xmlmanager.unmarshal_object(fp)
def __init__(self, id=None, **kw):
self._loaded = False
# first try to initialize all properties to their default values
for prop in self.properties(hidden=False):
try:
setattr(self, prop.name, prop.default_value())
except ValueError:
pass
if 'manager' in kw:
self._manager = kw['manager']
self.id = id
for key in kw:
if key != 'manager':
# We don't want any errors populating up when loading an object,
# so if it fails we just revert to it's default value
try:
setattr(self, key, kw[key])
except Exception as e:
boto.log.exception(e)
def __repr__(self):
return '%s<%s>' % (self.__class__.__name__, self.id)
def __str__(self):
return str(self.id)
def __eq__(self, other):
return other and isinstance(other, Model) and self.id == other.id
def _get_raw_item(self):
return self._manager.get_raw_item(self)
def load(self):
if self.id and not self._loaded:
self._manager.load_object(self)
def reload(self):
if self.id:
self._loaded = False
self._manager.load_object(self)
def put(self, expected_value=None):
"""
Save this object as it is, with an optional expected value
:param expected_value: Optional tuple of Attribute, and Value that
must be the same in order to save this object. If this
condition is not met, an SDBResponseError will be raised with a
Confict status code.
:type expected_value: tuple or list
:return: This object
:rtype: :class:`boto.sdb.db.model.Model`
"""
self._manager.save_object(self, expected_value)
return self
save = put
def put_attributes(self, attrs):
"""
Save just these few attributes, not the whole object
:param attrs: Attributes to save, key->value dict
:type attrs: dict
:return: self
:rtype: :class:`boto.sdb.db.model.Model`
"""
assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save"
for prop_name in attrs:
value = attrs[prop_name]
prop = self.find_property(prop_name)
assert(prop), "Property not found: %s" % prop_name
self._manager.set_property(prop, self, prop_name, value)
self.reload()
return self
def delete_attributes(self, attrs):
"""
Delete just these attributes, not the whole object.
:param attrs: Attributes to save, as a list of string names
:type attrs: list
:return: self
:rtype: :class:`boto.sdb.db.model.Model`
"""
assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete."
self._manager.domain.delete_attributes(self.id, attrs)
self.reload()
return self
save_attributes = put_attributes
def delete(self):
self._manager.delete_object(self)
def key(self):
return Key(obj=self)
def set_manager(self, manager):
self._manager = manager
def to_dict(self):
props = {}
for prop in self.properties(hidden=False):
props[prop.name] = getattr(self, prop.name)
obj = {'properties': props,
'id': self.id}
return {self.__class__.__name__: obj}
def to_xml(self, doc=None):
xmlmanager = self.get_xmlmanager()
doc = xmlmanager.marshal_object(self, doc)
return doc
@classmethod
def find_subclass(cls, name):
"""Find a subclass with a given name"""
if name == cls.__name__:
return cls
for sc in cls.__sub_classes__:
r = sc.find_subclass(name)
if r is not None:
return r
class Expando(Model):
def __setattr__(self, name, value):
if name in self._prop_names:
object.__setattr__(self, name, value)
elif name.startswith('_'):
object.__setattr__(self, name, value)
elif name == 'id':
object.__setattr__(self, name, value)
else:
self._manager.set_key_value(self, name, value)
object.__setattr__(self, name, value)
def __getattr__(self, name):
if not name.startswith('_'):
value = self._manager.get_key_value(self, name)
if value:
object.__setattr__(self, name, value)
return value
raise AttributeError
| mit | 7,552,580,929,164,204,000 | 33.317568 | 95 | 0.570388 | false |
guewen/odoo | addons/account/project/wizard/account_analytic_journal_report.py | 378 | 3164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_journal_report(osv.osv_memory):
_name = 'account.analytic.journal.report'
_description = 'Account Analytic Journal'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'analytic_account_journal_id': fields.many2many('account.analytic.journal', 'account_analytic_journal_name', 'journal_line_id', 'journal_print_id', 'Analytic Journals', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
ids_list = []
if context.get('active_id',False):
ids_list.append(context.get('active_id',False))
else:
record = self.browse(cr,uid,ids[0],context=context)
for analytic_record in record.analytic_account_journal_id:
ids_list.append(analytic_record.id)
datas = {
'ids': ids_list,
'model': 'account.analytic.journal',
'form': data
}
context2 = context.copy()
context2['active_model'] = 'account.analytic.journal'
context2['active_ids'] = ids_list
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticjournal', data=datas, context=context2)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(account_analytic_journal_report, self).default_get(cr, uid, fields, context=context)
if not context.has_key('active_ids'):
journal_ids = self.pool.get('account.analytic.journal').search(cr, uid, [], context=context)
else:
journal_ids = context.get('active_ids')
if 'analytic_account_journal_id' in fields:
res.update({'analytic_account_journal_id': journal_ids})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,971,742,572,881,555,000 | 41.756757 | 192 | 0.606195 | false |
3dfxsoftware/cbss-addons | vauxoo_reports/sale_order_report/report/amd_computadoras_sale_report.py | 1 | 1740 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
from osv import osv
from tools.translate import _
from report import pyPdf
class amd_computadoras_sale(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(amd_computadoras_sale, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'hello': self._hello,
})
def _hello(self,p):
print "estoy en hello"
output = pyPdf.PdfFileWriter()
print output
return "Hello World %s" % output
report_sxw.report_sxw(
'report.sale_m321_c_report',
'sale.order',
'addons/sale_order_report/report/amd_computadoras_sale_report.rml',
parser=amd_computadoras_sale)
| gpl-2.0 | -300,955,707,202,593,860 | 37.666667 | 83 | 0.623563 | false |
webu/django-cms | cms/test_utils/project/emailuserapp/forms.py | 61 | 3574 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import EmailUser
class UserCreationForm(forms.ModelForm):
"""
A form for creating a new user, including the required
email and password fields.
"""
error_messages = {
'duplicate_email': "A user with that email already exists.",
'password_mismatch': "The two password fields didn't match.",
}
email = forms.EmailField(
label='Email',
help_text="Required. Standard format email address.",
)
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Password confirmation',
widget=forms.PasswordInput,
help_text="Enter the same password as above, for verification."
)
class Meta:
model = EmailUser
fields = ('email',)
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = self.cleaned_data["email"]
User = get_user_model()
try:
User._default_manager.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages['duplicate_email'],
code='duplicate_email',
)
def clean_password2(self):
# check that the two passwords match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""
A form for updating users, including all fields on the user,
but replaces the password field with admin's password hash display
field.
"""
email = forms.EmailField(
label='Email',
help_text = "Required. Standard format email address.",
)
password = ReadOnlyPasswordHashField(label="Password",
help_text="Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>.")
class Meta:
model = EmailUser
fields = ('email', 'password', 'first_name', 'last_name', 'is_active',
'is_staff', 'is_superuser', 'groups', 'user_permissions', 'last_login',
'date_joined')
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
"""
Regardless of what the user provides, return the initial value.
This is done here, rather than on the field, because the
field does not have access to the inital value.
"""
return self.initial["password"]
| bsd-3-clause | 6,759,825,045,499,173,000 | 31.198198 | 83 | 0.612759 | false |
Fafou/Sick-Beard | sickbeard/clients/requests/packages/urllib3/contrib/ntlmpool.py | 262 | 4740 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| gpl-3.0 | -624,329,888,205,280,000 | 38.5 | 77 | 0.563924 | false |
dpayne9000/Rubixz-Coin | qa/rpc-tests/test_script_address2.py | 1 | 4079 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Rubixzcoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, []))
self.nodes.append(start_node(2, self.options.tmpdir, []))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")
assert_equal(multisig_addr_new, "QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe")
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
| mit | 9,043,095,281,890,640,000 | 39.79 | 93 | 0.633734 | false |
sightmachine/simplecv2-facerecognizer | examples/mustachinator.py | 1 | 1560 | #!/usr/bin/python
from operator import add
from simplecv.core.camera import Camera
from simplecv.display import Display
from simplecv.factory import Factory
cam = Camera()
display = Display((800,600))
counter = 0
# load the cascades
face_cascade = HaarCascade("face")
nose_cascade = HaarCascade("nose")
stache = Image("stache.png", sample=True) # load the stache
mask = stache.createAlphaMask() # load the stache mask
count = 0
while display.isNotDone():
img = cam.getImage()
img = img.scale(.5) #use a smaller image
faces = img.find_haar_features(face_cascade) #find faces
if( faces is not None ): # if we have a face
faces = faces.sort_area() #get the biggest one
face = faces[-1]
myFace = face.crop() # get the face image
noses = myFace.find_haar_features(nose_cascade) #find the nose
if( noses is not None ):# if we have a nose
noses = noses.sort_area()
nose = noses[0] # get the biggest
# these get the upper left corner of the face/nose with respect to original image
xf = face.x -(face.get_width()/2)
yf = face.y -(face.get_height()/2)
xm = nose.x -(nose.get_width()/2)
ym = nose.y -(nose.get_height()/2)
#calculate the mustache position
xmust = xf+xm-(stache.width/2)+(nose.get_width()/2)
ymust = yf+ym+(2*nose.get_height()/3)
#blit the stache/mask onto the image
img = img.blit(stache,pos=(xmust,ymust),mask = mask)
img.save(display) #display
| bsd-2-clause | -9,220,633,982,461,406,000 | 37.04878 | 93 | 0.624359 | false |
UoK-Psychology/Openethics | root/tests/views.py | 1 | 5023 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from mock import patch, call
from ethicsapplication.models import EthicsApplication
class IndexViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'testpass')
self.user.save()
def test_user_not_logged_in(self):
'''
If a non logged in user does a get request to the index url
they should sent directly to the index page
'''
response = self.client.get(reverse('index_view'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'index.html')
#assert context
self.assertFalse('active_applications' in response.context)
def test_user_is_logged_in_has_active_applications(self):
'''
If a user is logged in then the context should include:
active_applications : applications that the user is the pi for an are in the state *with reviewer*
applications_in_review: applications that the user is the pi for and are in the state *awaiting_approval*
approved_applications: applications that the user is the pi for and are in the state *approved*
these lists are generated using the get_applications_for_principle_investigator function
passing in the specific state filter. This function is mocked in this test.
'''
'''
Below we do a mock up for get_active_applications() and hardwire for it to return what we expect before each call.
This means it can run as a unit test in isolation of EthicsApplicationManager
'''
with patch('ethicsapplication.models.EthicsApplicationManager.get_applications_for_principle_investigator') as manager_mock:
manager_mock.return_value = [] #set what value we want the call the get_active_applicaitons() to return below..
#have a user, and be logged in
#get request to the index page
self.client.login(username='test', password='testpass')
response = self.client.get(reverse('index_view')) #the context returned by a call to index_view will include the result
#of a call to get_active_applications.
#assert 200
#assert the template
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'index.html')
#assert context
self.assertEqual(response.context['active_applications'], [])
self.assertEqual(response.context['applications_in_review'], [])
self.assertEqual(response.context['approved_applications'], [])
#assert that manager_mock is called
self.assertTrue(call(self.user, 'with_researcher') in manager_mock.mock_calls)
self.assertTrue(call(self.user, 'awaiting_approval') in manager_mock.mock_calls)
self.assertTrue(call(self.user, 'approved') in manager_mock.mock_calls)
def test_user_is_logged_in_has_applications_for_review(self):
'''
If the user has got applications that they are the reviewer for, which are in the state
'*awaiting approval* then they should
be listed in the context as applications_for_review
'''
with patch('ethicsapplication.models.EthicsApplicationManager.get_applications_for_reviewer') as manager_mock:
application_for_review = EthicsApplication.objects.create(title='test', principle_investigator=self.user)
manager_mock.return_value = [application_for_review] #set what value we want the call the get_active_applicaitons() to return below..
#have a user, and be logged in
#get request to the index page
self.client.login(username='test', password='testpass')
response = self.client.get(reverse('index_view')) #the context returned by a call to index_view will include the result
#of a call to get_active_applications.
#assert 200
#assert the template
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'index.html')
#assert context
self.assertTrue('applications_for_review' in response.context)
self.assertEqual(response.context['applications_for_review'], [application_for_review] )
#assert that manager_mock is called
manager_mock.assert_called_with(self.user, 'awaiting_approval')
| gpl-3.0 | -5,101,681,904,553,390,000 | 50.265306 | 150 | 0.611985 | false |
fabada/pootle | pootle/apps/pootle_app/project_tree.py | 5 | 16115 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import errno
import logging
import os
import re
from django.conf import settings
from pootle.core.log import store_log, STORE_RESURRECTED
from pootle.core.utils.timezone import datetime_min
from pootle_app.models.directory import Directory
from pootle_language.models import Language
from pootle_store.models import Store
from pootle_store.util import absolute_real_path, relative_real_path
#: Case insensitive match for language codes
LANGCODE_RE = re.compile('^[a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?$',
re.IGNORECASE)
#: Case insensitive match for language codes as postfix
LANGCODE_POSTFIX_RE = re.compile('^.*?[-_.]([a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?)$',
re.IGNORECASE)
def direct_language_match_filename(language_code, path_name):
name, ext = os.path.splitext(os.path.basename(path_name))
if name == language_code or name.lower() == language_code.lower():
return True
# Check file doesn't match another language.
if Language.objects.filter(code__iexact=name).count():
return False
detect = LANGCODE_POSTFIX_RE.split(name)
return (len(detect) > 1 and
(detect[1] == language_code or
detect[1].lower() == language_code.lower()))
def match_template_filename(project, filename):
"""Test if :param:`filename` might point at a template file for a given
:param:`project`.
"""
name, ext = os.path.splitext(os.path.basename(filename))
#FIXME: is the test for matching extension redundant?
if ext == os.path.extsep + project.get_template_filetype():
if ext != os.path.extsep + project.localfiletype:
# Template extension is distinct, surely file is a template.
return True
elif not find_lang_postfix(filename):
# File name can't possibly match any language, assume it is a
# template.
return True
return False
def get_matching_language_dirs(project_dir, language):
return [lang_dir for lang_dir in os.listdir(project_dir)
if language.code == lang_dir]
def get_non_existant_language_dir(project_dir, language, file_style, make_dirs):
if file_style == "gnu":
return project_dir
elif make_dirs:
language_dir = os.path.join(project_dir, language.code)
os.mkdir(language_dir)
return language_dir
else:
raise IndexError("Directory not found for language %s, project %s" %
(language.code, project_dir))
def get_or_make_language_dir(project_dir, language, file_style, make_dirs):
matching_language_dirs = get_matching_language_dirs(project_dir, language)
if len(matching_language_dirs) == 0:
# If no matching directories can be found, check if it is a GNU-style
# project.
return get_non_existant_language_dir(project_dir, language, file_style,
make_dirs)
else:
return os.path.join(project_dir, matching_language_dirs[0])
def get_language_dir(project_dir, language, file_style, make_dirs):
language_dir = os.path.join(project_dir, language.code)
if not os.path.exists(language_dir):
return get_or_make_language_dir(project_dir, language, file_style,
make_dirs)
else:
return language_dir
def get_translation_project_dir(language, project_dir, file_style,
make_dirs=False):
"""Returns the base directory containing translations files for the
project.
:param make_dirs: if ``True``, project and language directories will be
created as necessary.
"""
if file_style == 'gnu':
return project_dir
else:
return get_language_dir(project_dir, language, file_style, make_dirs)
def is_hidden_file(path):
return path[0] == '.'
def split_files_and_dirs(ignored_files, ext, real_dir, file_filter):
files = []
dirs = []
for child_path in [child_path for child_path in os.listdir(real_dir)
if child_path not in ignored_files and
not is_hidden_file(child_path)]:
full_child_path = os.path.join(real_dir, child_path)
if (os.path.isfile(full_child_path) and
full_child_path.endswith(ext) and file_filter(full_child_path)):
files.append(child_path)
elif os.path.isdir(full_child_path):
dirs.append(child_path)
return files, dirs
def add_items(fs_items_set, db_items, create_or_resurrect_db_item, parent):
"""Add/make obsolete the database items to correspond to the filesystem.
:param fs_items_set: items (dirs, files) currently in the filesystem
:param db_items: dict (name, item) of items (dirs, stores) currently in the
database
:create_or_resurrect_db_item: callable that will create a new db item
or resurrect an obsolete db item with a given name and parent.
:parent: parent db directory for the items
:return: list of all items, list of newly added items
:rtype: tuple
"""
items = []
new_items = []
db_items_set = set(db_items)
items_to_delete = db_items_set - fs_items_set
items_to_create = fs_items_set - db_items_set
for name in items_to_delete:
db_items[name].makeobsolete()
if len(items_to_delete) > 0:
parent.update_all_cache()
for vfolder_treeitem in parent.vfolder_treeitems:
vfolder_treeitem.update_all_cache()
for name in db_items_set - items_to_delete:
items.append(db_items[name])
for name in items_to_create:
item = create_or_resurrect_db_item(name)
items.append(item)
new_items.append(item)
try:
item.save()
except Exception:
logging.exception('Error while adding %s', item)
return items, new_items
def create_or_resurrect_store(file, parent, name, translation_project):
"""Create or resurrect a store db item with given name and parent."""
try:
store = Store.objects.get(parent=parent, name=name)
store.obsolete = False
store.file_mtime = datetime_min
if store.last_sync_revision is None:
store.last_sync_revision = store.get_max_unit_revision()
store_log(user='system', action=STORE_RESURRECTED,
path=store.pootle_path, store=store.id)
except Store.DoesNotExist:
store = Store(file=file, parent=parent,
name=name, translation_project=translation_project)
store.mark_all_dirty()
return store
def create_or_resurrect_dir(name, parent):
"""Create or resurrect a directory db item with given name and parent."""
try:
dir = Directory.objects.get(parent=parent, name=name)
dir.obsolete = False
except Directory.DoesNotExist:
dir = Directory(name=name, parent=parent)
dir.mark_all_dirty()
return dir
# TODO: rename function or even rewrite it
def add_files(translation_project, ignored_files, ext, relative_dir, db_dir,
file_filter=lambda _x: True):
podir_path = to_podir_path(relative_dir)
files, dirs = split_files_and_dirs(ignored_files, ext, podir_path,
file_filter)
file_set = set(files)
dir_set = set(dirs)
existing_stores = dict((store.name, store) for store in
db_dir.child_stores.live().exclude(file='')
.iterator())
existing_dirs = dict((dir.name, dir) for dir in
db_dir.child_dirs.live().iterator())
files, new_files = add_items(
file_set,
existing_stores,
lambda name: create_or_resurrect_store(
file=os.path.join(relative_dir, name),
parent=db_dir,
name=name,
translation_project=translation_project,
),
db_dir,
)
db_subdirs, new_db_subdirs = add_items(
dir_set,
existing_dirs,
lambda name: create_or_resurrect_dir(name=name, parent=db_dir),
db_dir,
)
is_empty = len(files) == 0
for db_subdir in db_subdirs:
fs_subdir = os.path.join(relative_dir, db_subdir.name)
_files, _new_files, _is_empty = \
add_files(translation_project, ignored_files, ext, fs_subdir,
db_subdir, file_filter)
files += _files
new_files += _new_files
is_empty &= _is_empty
if is_empty:
db_dir.makeobsolete()
return files, new_files, is_empty
def to_podir_path(path):
path = relative_real_path(path)
return os.path.join(settings.POOTLE_TRANSLATION_DIRECTORY, path)
def find_lang_postfix(filename):
"""Finds the language code at end of a filename."""
name = os.path.splitext(os.path.basename(filename))[0]
if LANGCODE_RE.match(name):
return name
match = LANGCODE_POSTFIX_RE.match(name)
if match:
return match.groups()[0]
for code in Language.objects.values_list('code', flat=True):
if (name.endswith('-'+code) or name.endswith('_'+code) or
name.endswith('.'+code) or
name.lower().endswith('-'+code.lower()) or
name.endswith('_'+code) or name.endswith('.'+code)):
return code
def translation_project_should_exist(language, project):
"""Tests if there are translation files corresponding to the given
:param:`language` and :param:`project`.
"""
if project.get_treestyle() == "gnu":
# GNU style projects are tricky
if language.code == 'templates':
# Language is template look for template files
for dirpath, dirnames, filenames in os.walk(project.get_real_path()):
for filename in filenames:
if project.file_belongs_to_project(filename, match_templates=True) and \
match_template_filename(project, filename):
return True
else:
# find files with the language name in the project dir
for dirpath, dirnames, filenames in os.walk(project.get_real_path()):
for filename in filenames:
#FIXME: don't reuse already used file
if project.file_belongs_to_project(filename, match_templates=False) and \
direct_language_match_filename(language.code, filename):
return True
else:
# find directory with the language name in the project dir
try:
dirpath, dirnames, filename = os.walk(project.get_real_path()).next()
if language.code in dirnames:
return True
except StopIteration:
pass
return False
def init_store_from_template(translation_project, template_store):
"""Initialize a new file for `translation_project` using `template_store`.
"""
if translation_project.file_style == 'gnu':
target_pootle_path, target_path = get_translated_name_gnu(translation_project,
template_store)
else:
target_pootle_path, target_path = get_translated_name(translation_project,
template_store)
# Create the missing directories for the new TP.
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
output_file = template_store.file.store
output_file.settargetlanguage(translation_project.language.code)
output_file.savefile(target_path)
def get_translated_name_gnu(translation_project, store):
"""Given a template :param:`store` and a :param:`translation_project` return
target filename.
"""
pootle_path_parts = store.pootle_path.split('/')
pootle_path_parts[1] = translation_project.language.code
pootle_path = '/'.join(pootle_path_parts[:-1])
if not pootle_path.endswith('/'):
pootle_path = pootle_path + '/'
suffix = translation_project.language.code + os.extsep + \
translation_project.project.localfiletype
# try loading file first
try:
target_store = translation_project.stores.live().get(
parent__pootle_path=pootle_path,
name__iexact=suffix,
)
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except Store.DoesNotExist:
target_store = None
# is this GNU-style with prefix?
use_prefix = (store.parent.child_stores.live().exclude(file="").count() > 1 or
translation_project.stores.live().exclude(name__iexact=suffix,
file='').count())
if not use_prefix:
# let's make sure
for tp in translation_project.project.translationproject_set.exclude(language__code='templates').iterator():
temp_suffix = tp.language.code + os.extsep + translation_project.project.localfiletype
if tp.stores.live().exclude(name__iexact=temp_suffix).exclude(file="").count():
use_prefix = True
break
if use_prefix:
if store.translation_project.language.code == 'templates':
tprefix = os.path.splitext(store.name)[0]
#FIXME: we should detect separator
prefix = tprefix + '-'
else:
prefix = os.path.splitext(store.name)[0][:-len(store.translation_project.language.code)]
tprefix = prefix[:-1]
try:
target_store = translation_project.stores.live().filter(
parent__pootle_path=pootle_path,
name__in=[
tprefix + '-' + suffix,
tprefix + '_' + suffix,
tprefix + '.' + suffix,
tprefix + '-' + suffix.lower(),
tprefix + '_' + suffix.lower(),
tprefix + '.' + suffix.lower(),
],
)[0]
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except (Store.DoesNotExist, IndexError):
pass
else:
prefix = ""
if store.file:
path_parts = store.file.path.split(os.sep)
name = prefix + suffix
path_parts[-1] = name
pootle_path_parts[-1] = name
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
return '/'.join(pootle_path_parts), os.sep.join(path_parts)
def get_translated_name(translation_project, store):
name, ext = os.path.splitext(store.name)
if store.file:
path_parts = store.file.name.split(os.sep)
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
pootle_path_parts = store.pootle_path.split('/')
# Replace language code
path_parts[1] = translation_project.language.code
pootle_path_parts[1] = translation_project.language.code
# Replace extension
path_parts[-1] = name + '.' + translation_project.project.localfiletype
pootle_path_parts[-1] = name + '.' + \
translation_project.project.localfiletype
return ('/'.join(pootle_path_parts),
absolute_real_path(os.sep.join(path_parts)))
def does_not_exist(path):
if os.path.exists(path):
return False
try:
os.stat(path)
# what the hell?
except OSError as e:
if e.errno == errno.ENOENT:
# explicit no such file or directory
return True
| gpl-3.0 | -8,047,285,457,306,035,000 | 35.132287 | 116 | 0.60422 | false |
NMGRL/pychron | pychron/mv/diode_locator.py | 2 | 1121 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.mv.locator import Locator
class DiodeLocator(Locator):
pass
# ============= EOF =============================================
| apache-2.0 | 2,390,733,933,697,944,000 | 37.655172 | 81 | 0.530776 | false |
StudTeam6/competition | sw/ground_segment/python/udp_link/datalink_to_w5100.py | 89 | 1441 | #!/usr/bin/python
import os
import sys
import socket
import struct
from optparse import OptionParser
sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/lib/python")
parser = OptionParser()
parser.add_option("-d", "--destip", dest="dest_addr", help="Destination IP for messages picked up from local socket", default="192.168.25.47")
parser.add_option("-p", "--destport", dest="dest_port", default=1234, help="Destination UDP port to send messages to")
parser.add_option("-l", "--localport", dest="local_port", default=4243, help="Local port to listen to for UDP messages")
(options, args) = parser.parse_args()
msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
msock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
msock.bind(("", int(options.local_port)))
# mreq = struct.pack("4sl", socket.inet_aton(telemip), socket.INADDR_ANY)
# msock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# initialize a socket, think of it as a cable
# SOCK_DGRAM specifies that this is UDP
destsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
while( 1 ):
data = None
try:
data, addr = msock.recvfrom(1024)
format = 'B' * (len(data))
strdata = struct.unpack( format, data )
print len( strdata ), ":", strdata
# send the command
destsock.sendto( data, (options.dest_addr, options.dest_port) )
except socket.error, e:
print 'Exception', e
| gpl-2.0 | 1,211,181,697,388,336,400 | 32.511628 | 142 | 0.696738 | false |
idjaw/cliff | cliff/formatters/base.py | 3 | 1507 | """Base classes for formatters.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Formatter(object):
@abc.abstractmethod
def add_argument_group(self, parser):
"""Add any options to the argument parser.
Should use our own argument group.
"""
@six.add_metaclass(abc.ABCMeta)
class ListFormatter(Formatter):
"""Base class for formatters that know how to deal with multiple objects.
"""
@abc.abstractmethod
def emit_list(self, column_names, data, stdout, parsed_args):
"""Format and print the list from the iterable data source.
:param column_names: names of the columns
:param data: iterable data source, one tuple per object
with values in order of column names
:param stdout: output stream where data should be written
:param parsed_args: argparse namespace from our local options
"""
@six.add_metaclass(abc.ABCMeta)
class SingleFormatter(Formatter):
"""Base class for formatters that work with single objects.
"""
@abc.abstractmethod
def emit_one(self, column_names, data, stdout, parsed_args):
"""Format and print the values associated with the single object.
:param column_names: names of the columns
:param data: iterable data source with values in order of column names
:param stdout: output stream where data should be written
:param parsed_args: argparse namespace from our local options
"""
| apache-2.0 | 6,276,743,716,660,493,000 | 29.14 | 78 | 0.676841 | false |
coll-gate/collgate | server/main/language.py | 1 | 4780 | # -*- coding: utf-8; -*-
#
# @file language.py
# @brief Views related to the language type.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
from django.core.exceptions import SuspiciousOperation
from django.shortcuts import get_object_or_404
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from igdectk.rest.handler import *
from igdectk.rest.response import HttpResponseRest
from main.cache import cache_manager
from messenger.cache import client_cache_manager
from .models import InterfaceLanguages, Language
from .main import RestMain
class RestLanguage(RestMain):
regex = r'^language/$'
suffix = 'language'
class RestLanguageCode(RestLanguage):
regex = r'^(?P<code>[a-zA-Z]{2}([_-][a-zA-Z]{2})*)/$'
suffix = 'code'
class RestLanguageCodeLabel(RestLanguageCode):
regex = r'^label/$'
suffix = 'label'
class RestUI(RestMain):
regex = r'^ui/$'
suffix = 'ui'
class RestUILanguage(RestUI):
regex = r'^language/$'
suffix = 'language'
@RestLanguage.def_request(Method.GET, Format.JSON)
def get_languages(request):
"""
Get the list of languages for the entities in JSON
"""
lang = translation.get_language()
cache_name = 'languages:%s' % lang
languages = cache_manager.get('main', cache_name)
if languages:
return HttpResponseRest(request, languages)
languages = []
for language in Language.objects.all().order_by('code'):
languages.append({
'id': language.code,
'value': language.code,
'label': language.get_label()
})
# cache for 24h
cache_manager.set('main', cache_name, languages, 60*60*24)
return HttpResponseRest(request, languages)
@RestLanguage.def_admin_request(Method.POST, Format.JSON, content={
"type": "object",
"properties": {
"code": Language.CODE_VALIDATOR,
"label": Language.LABEL_VALIDATOR
},
},
staff=True
)
def post_language(request):
"""
Create an new language for data.
"""
code = request.data['code']
label = request.data['label']
lang = translation.get_language()
language = Language()
language.code = code
language.set_label(lang, request.data['label'])
language.save()
results = {
'id': code,
'value': code,
'label': label
}
return HttpResponseRest(request, results)
@RestLanguageCode.def_admin_request(Method.DELETE, Format.JSON, staff=True)
def delete_language(request, code):
language = get_object_or_404(Language, code=code)
# do we allow delete because of data consistency ?
# it is not really a problem because the code is a standard
language.delete()
return HttpResponseRest(request, {})
@RestLanguageCodeLabel.def_auth_request(Method.GET, Format.JSON)
def get_all_labels_of_language(request, code):
"""
Returns labels for each language related to the user interface.
"""
language = get_object_or_404(Language, code=code)
label_dict = language.label
# complete with missing languages
for lang, lang_label in InterfaceLanguages.choices():
if lang not in label_dict:
label_dict[lang] = ""
results = label_dict
return HttpResponseRest(request, results)
@RestLanguageCodeLabel.def_admin_request(Method.PUT, Format.JSON, content={
"type": "object",
"additionalProperties": Language.LABEL_VALIDATOR
},
staff=True)
def change_language_labels(request, code):
language = get_object_or_404(Language, code=code)
labels = request.data
languages_values = [lang[0] for lang in InterfaceLanguages.choices()]
for lang, label in labels.items():
if lang not in languages_values:
raise SuspiciousOperation(_("Unsupported language identifier"))
language.label = labels
language.save()
result = {
'label': language.get_label()
}
return HttpResponseRest(request, result)
@RestUILanguage.def_request(Method.GET, Format.JSON)
def get_ui_languages(request):
"""
Get the list of languages for the UI in JSON
"""
lang = translation.get_language()
cache_name = 'ui-languages:%s' % lang
results = cache_manager.get('main', cache_name)
if results:
return results
languages = []
for language in InterfaceLanguages:
languages.append({
'id': language.value,
'value': language.value,
'label': str(language.label)
})
# cache for 24h
cache_manager.set('main', cache_name, results, 60*60*24)
return HttpResponseRest(request, languages)
| mit | -7,089,780,795,515,978,000 | 24.015707 | 75 | 0.657807 | false |
PacktPublishing/Mastering-Mesos | Chapter4/Aurora/src/test/python/apache/aurora/common/test_cluster_option.py | 14 | 2562 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import OptionParser
import pytest
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.cluster_option import ClusterOption
from apache.aurora.common.clusters import Clusters
CLUSTER_LIST = Clusters((
Cluster(name='smf1'),
Cluster(name='smf1-test'),
))
def cluster_provider(name):
return CLUSTER_LIST[name]
def test_constructors():
ClusterOption('--test', '-t', help="Test cluster.", clusters=CLUSTER_LIST)
ClusterOption('--test', '-t', help="Test cluster.", cluster_provider=cluster_provider)
with pytest.raises(ValueError):
ClusterOption()
with pytest.raises(ValueError):
ClusterOption('--cluster') # requires clusters=
with pytest.raises(ValueError):
ClusterOption('--cluster', clusters=CLUSTER_LIST, cluster_provider=cluster_provider)
class MockOptionParser(OptionParser):
class Error(Exception): pass
def error(self, msg):
# per optparse documentation:
# Print a usage message incorporating 'msg' to stderr and exit.
# If you override this in a subclass, it should not return -- it
# should either exit or raise an exception.
raise self.Error(msg)
def make_parser():
parser = MockOptionParser()
parser.add_option(ClusterOption('--source_cluster', '-s', clusters=CLUSTER_LIST))
parser.add_option(ClusterOption('--dest_cluster', clusters=CLUSTER_LIST))
parser.add_option(ClusterOption('--cluster', cluster_provider=cluster_provider))
return parser
def test_parsable():
parser = make_parser()
values, _ = parser.parse_args(['--source_cluster=smf1-test', '--cluster=smf1-test'])
assert isinstance(values.source_cluster, Cluster)
assert isinstance(values.cluster, Cluster)
def test_not_parsable():
parser = make_parser()
try:
parser.parse_args(['--source_cluster=borg'])
except MockOptionParser.Error as e:
assert 'borg is not a valid cluster for the --source_cluster option.' in e.args[0]
else:
assert False, 'Expected OptionParser to raise on invalid cluster list.'
| mit | 934,750,406,198,310,000 | 31.025 | 88 | 0.735753 | false |
petewarden/tensorflow | tensorflow/python/data/experimental/ops/prefetching_ops.py | 17 | 11416 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import function
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.prefetch_to_device")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.apply(
copy_to_device(target_device=device)).prefetch(buffer_size)
return _apply_fn
@tf_export("data.experimental.copy_to_device")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
return _CopyToDeviceDataset(
dataset, target_device=target_device,
source_device=source_device).with_options(options)
return _apply_fn
# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
# all inputs to the Op are in host memory, thereby avoiding some unnecessary
# Sends and Recvs.
class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that copies elements to another device."""
def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
"""Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
"""
self._input_dataset = input_dataset
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = (spec.device_type == "GPU")
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(
self._input_dataset._variant_tensor) # pylint: disable=protected-access
@function.defun()
def _init_func():
"""Creates an iterator for the input dataset.
Returns:
A `string` tensor that encapsulates the iterator created.
"""
ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
resource = gen_dataset_ops.anonymous_iterator(
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies(
[gen_dataset_ops.make_iterator(ds_variant, resource)]):
return gen_dataset_ops.iterator_to_string_handle(resource)
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun()
def _remote_init_func():
return functional_ops.remote_call(
target=self._source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _next_func(string_handle):
"""Calls get_next for created iterator.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
The elements generated from `input_dataset`
"""
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(
string_handle,
dataset_ops.get_legacy_output_types(self),
dataset_ops.get_legacy_output_shapes(self),
dataset_ops.get_legacy_output_classes(self))
return structure.to_tensor_list(self.element_spec, iterator.get_next())
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True})
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=self._input_dataset._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
g = ops.get_default_graph()
self._init_func.add_to_graph(g)
self._next_func.add_to_graph(g)
self._finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
with ops.device(self._target_device):
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
# that thereby captures all the inputs required to create the dataset. Since
# there are strings that are inputs to the GeneratorDataset which can't be
# placed on a GPU, this fails for the GPU case. Therefore, disabling it for
# GPU
def make_one_shot_iterator(self):
if self._is_gpu_target:
raise ValueError("Cannot create a one shot iterator when using "
"`tf.data.experimental.copy_to_device()` on GPU. Please "
"use `Dataset.make_initializable_iterator()` instead.")
else:
return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
class _MapOnGpuDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over elements in its using a GPU."""
def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
defun_kwargs={"experimental_ints_on_device": True})
variant_tensor = ged_ops.experimental_map_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
**self._flat_structure)
super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "map_on_gpu()"
def map_on_gpu(map_func):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
`map_func` on GPU. It must be used after applying the
`tf.data.experimental.copy_to_device` transformation with a GPU device
argument.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _MapOnGpuDataset(dataset, map_func)
return _apply_fn
| apache-2.0 | -3,976,324,286,849,288,700 | 39.626335 | 116 | 0.698318 | false |
ctismer/pyfilesystem | fs/contrib/tahoelafs/__init__.py | 14 | 15472 | '''
fs.contrib.tahoelafs
====================
This modules provides a PyFilesystem interface to the Tahoe Least Authority
File System. Tahoe-LAFS is a distributed, encrypted, fault-tolerant storage
system:
http://tahoe-lafs.org/
You will need access to a Tahoe-LAFS "web api" service.
Example (it will use publicly available (but slow) Tahoe-LAFS cloud)::
from fs.contrib.tahoelafs import TahoeLAFS, Connection
dircap = TahoeLAFS.createdircap(webapi='http://insecure.tahoe-lafs.org')
print "Your dircap (unique key to your storage directory) is", dircap
print "Keep it safe!"
fs = TahoeLAFS(dircap, autorun=False, webapi='http://insecure.tahoe-lafs.org')
f = fs.open("foo.txt", "a")
f.write('bar!')
f.close()
print "Now visit %s and enjoy :-)" % fs.getpathurl('foo.txt')
When any problem occurred, you can turn on internal debugging messages::
import logging
l = logging.getLogger()
l.setLevel(logging.DEBUG)
l.addHandler(logging.StreamHandler(sys.stdout))
... your Python code using TahoeLAFS ...
TODO:
* unicode support
* try network errors / bad happiness
* exceptions
* tests
* sanitize all path types (., /)
* support for extra large file uploads (poster module)
* Possibility to block write until upload done (Tahoe mailing list)
* Report something sane when Tahoe crashed/unavailable
* solve failed unit tests (makedir_winner, ...)
* file times
* docs & author
* python3 support
* remove creating blank files (depends on FileUploadManager)
TODO (Not TahoeLAFS specific tasks):
* RemoteFileBuffer on the fly buffering support
* RemoteFileBuffer unit tests
* RemoteFileBuffer submit to trunk
* Implement FileUploadManager + faking isfile/exists of just processing file
* pyfilesystem docs is outdated (rename, movedir, ...)
'''
import stat as statinfo
import logging
from logging import DEBUG, INFO, ERROR, CRITICAL
import fs
import fs.errors as errors
from fs.path import abspath, relpath, normpath, dirname, pathjoin
from fs.base import FS, NullFile
from fs import _thread_synchronize_default, SEEK_END
from fs.remote import CacheFSMixin, RemoteFileBuffer
from fs.base import fnmatch, NoDefaultMeta
from util import TahoeUtil
from connection import Connection
from six import b
logger = fs.getLogger('fs.tahoelafs')
def _fix_path(func):
"""Method decorator for automatically normalising paths."""
def wrapper(self, *args, **kwds):
if len(args):
args = list(args)
args[0] = _fixpath(args[0])
return func(self, *args, **kwds)
return wrapper
def _fixpath(path):
"""Normalize the given path."""
return abspath(normpath(path))
class _TahoeLAFS(FS):
"""FS providing raw access to a Tahoe-LAFS Filesystem.
This class implements all the details of interacting with a Tahoe-backed
filesystem, but you probably don't want to use it in practice. Use the
TahoeLAFS class instead, which has some internal caching to improve
performance.
"""
_meta = { 'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True
}
def __init__(self, dircap, largefilesize=10*1024*1024, webapi='http://127.0.0.1:3456'):
'''Creates instance of TahoeLAFS.
:param dircap: special hash allowing user to work with TahoeLAFS directory.
:param largefilesize: - Create placeholder file for files larger than this treshold.
Uploading and processing of large files can last extremely long (many hours),
so placing this placeholder can help you to remember that upload is processing.
Setting this to None will skip creating placeholder files for any uploads.
'''
self.dircap = dircap if not dircap.endswith('/') else dircap[:-1]
self.largefilesize = largefilesize
self.connection = Connection(webapi)
self.tahoeutil = TahoeUtil(webapi)
super(_TahoeLAFS, self).__init__(thread_synchronize=_thread_synchronize_default)
def __str__(self):
return "<TahoeLAFS: %s>" % self.dircap
@classmethod
def createdircap(cls, webapi='http://127.0.0.1:3456'):
return TahoeUtil(webapi).createdircap()
def getmeta(self,meta_name,default=NoDefaultMeta):
if meta_name == "read_only":
return self.dircap.startswith('URI:DIR2-RO')
return super(_TahoeLAFS,self).getmeta(meta_name,default)
@_fix_path
def open(self, path, mode='r', **kwargs):
self._log(INFO, 'Opening file %s in mode %s' % (path, mode))
newfile = False
if not self.exists(path):
if 'w' in mode or 'a' in mode:
newfile = True
else:
self._log(DEBUG, "File %s not found while opening for reads" % path)
raise errors.ResourceNotFoundError(path)
elif self.isdir(path):
self._log(DEBUG, "Path %s is directory, not a file" % path)
raise errors.ResourceInvalidError(path)
elif 'w' in mode:
newfile = True
if newfile:
self._log(DEBUG, 'Creating empty file %s' % path)
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
self.setcontents(path, b(''))
handler = NullFile()
else:
self._log(DEBUG, 'Opening existing file %s for reading' % path)
handler = self.getrange(path,0)
return RemoteFileBuffer(self, path, mode, handler,
write_on_flush=False)
@_fix_path
def desc(self, path):
try:
return self.getinfo(path)
except:
return ''
@_fix_path
def exists(self, path):
try:
self.getinfo(path)
self._log(DEBUG, "Path %s exists" % path)
return True
except errors.ResourceNotFoundError:
self._log(DEBUG, "Path %s does not exists" % path)
return False
except errors.ResourceInvalidError:
self._log(DEBUG, "Path %s does not exists, probably misspelled URI" % path)
return False
@_fix_path
def getsize(self, path):
try:
size = self.getinfo(path)['size']
self._log(DEBUG, "Size of %s is %d" % (path, size))
return size
except errors.ResourceNotFoundError:
return 0
@_fix_path
def isfile(self, path):
try:
isfile = (self.getinfo(path)['type'] == 'filenode')
except errors.ResourceNotFoundError:
#isfile = not path.endswith('/')
isfile = False
self._log(DEBUG, "Path %s is file: %d" % (path, isfile))
return isfile
@_fix_path
def isdir(self, path):
try:
isdir = (self.getinfo(path)['type'] == 'dirnode')
except errors.ResourceNotFoundError:
isdir = False
self._log(DEBUG, "Path %s is directory: %d" % (path, isdir))
return isdir
def listdir(self, *args, **kwargs):
return [ item[0] for item in self.listdirinfo(*args, **kwargs) ]
def listdirinfo(self, *args, **kwds):
return list(self.ilistdirinfo(*args,**kwds))
def ilistdir(self, *args, **kwds):
for item in self.ilistdirinfo(*args,**kwds):
yield item[0]
@_fix_path
def ilistdirinfo(self, path="/", wildcard=None, full=False, absolute=False,
dirs_only=False, files_only=False):
self._log(DEBUG, "Listing directory (listdirinfo) %s" % path)
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
for item in self.tahoeutil.list(self.dircap, path):
if dirs_only and item['type'] == 'filenode':
continue
elif files_only and item['type'] == 'dirnode':
continue
if wildcard is not None:
if isinstance(wildcard,basestring):
if not fnmatch.fnmatch(item['name'], wildcard):
continue
else:
if not wildcard(item['name']):
continue
if full:
item_path = relpath(pathjoin(path, item['name']))
elif absolute:
item_path = abspath(pathjoin(path, item['name']))
else:
item_path = item['name']
yield (item_path, item)
@_fix_path
def remove(self, path):
self._log(INFO, 'Removing file %s' % path)
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
if not self.isfile(path):
if not self.isdir(path):
raise errors.ResourceNotFoundError(path)
raise errors.ResourceInvalidError(path)
try:
self.tahoeutil.unlink(self.dircap, path)
except Exception, e:
raise errors.ResourceInvalidError(path)
@_fix_path
def removedir(self, path, recursive=False, force=False):
self._log(INFO, "Removing directory %s" % path)
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
if not self.isdir(path):
if not self.isfile(path):
raise errors.ResourceNotFoundError(path)
raise errors.ResourceInvalidError(path)
if not force and self.listdir(path):
raise errors.DirectoryNotEmptyError(path)
self.tahoeutil.unlink(self.dircap, path)
if recursive and path != '/':
try:
self.removedir(dirname(path), recursive=True)
except errors.DirectoryNotEmptyError:
pass
@_fix_path
def makedir(self, path, recursive=False, allow_recreate=False):
self._log(INFO, "Creating directory %s" % path)
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
if self.exists(path):
if not self.isdir(path):
raise errors.ResourceInvalidError(path)
if not allow_recreate:
raise errors.DestinationExistsError(path)
if not recursive and not self.exists(dirname(path)):
raise errors.ParentDirectoryMissingError(path)
self.tahoeutil.mkdir(self.dircap, path)
def movedir(self, src, dst, overwrite=False):
self.move(src, dst, overwrite=overwrite)
def move(self, src, dst, overwrite=False):
self._log(INFO, "Moving file from %s to %s" % (src, dst))
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
src = _fixpath(src)
dst = _fixpath(dst)
if not self.exists(dirname(dst)):
raise errors.ParentDirectoryMissingError(dst)
if not overwrite and self.exists(dst):
raise errors.DestinationExistsError(dst)
self.tahoeutil.move(self.dircap, src, dst)
def rename(self, src, dst):
self.move(src, dst)
def copy(self, src, dst, overwrite=False, chunk_size=16384):
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
# FIXME: this is out of date; how to do native tahoe copy?
# FIXME: Workaround because isfile() not exists on _TahoeLAFS
FS.copy(self, src, dst, overwrite, chunk_size)
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
# FIXME: this is out of date; how to do native tahoe copy?
# FIXME: Workaround because isfile() not exists on _TahoeLAFS
FS.copydir(self, src, dst, overwrite, ignore_errors, chunk_size)
def _log(self, level, message):
if not logger.isEnabledFor(level): return
logger.log(level, u'(%d) %s' % (id(self),
unicode(message).encode('ASCII', 'replace')))
@_fix_path
def getpathurl(self, path, allow_none=False, webapi=None):
'''
Retrieve URL where the file/directory is stored
'''
if webapi == None:
webapi = self.connection.webapi
self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi))
path = self.tahoeutil.fixwinpath(path, False)
return u"%s/uri/%s%s" % (webapi, self.dircap, path)
@_fix_path
def getrange(self, path, offset, length=None):
return self.connection.get(u'/uri/%s%s' % (self.dircap, path),
offset=offset, length=length)
@_fix_path
def setcontents(self, path, file, chunk_size=64*1024):
self._log(INFO, 'Uploading file %s' % path)
size=None
if self.getmeta("read_only"):
raise errors.UnsupportedError('read only filesystem')
# Workaround for large files:
# First create zero file placeholder, then
# upload final content.
if self.largefilesize != None and getattr(file, 'read', None):
# As 'file' can be also a string, need to check,
# if 'file' looks like duck. Sorry, file.
file.seek(0, SEEK_END)
size = file.tell()
file.seek(0)
if size > self.largefilesize:
self.connection.put(u'/uri/%s%s' % (self.dircap, path),
"PyFilesystem.TahoeLAFS: Upload started, final size %d" % size)
self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size)
@_fix_path
def getinfo(self, path):
self._log(INFO, 'Reading meta for %s' % path)
info = self.tahoeutil.info(self.dircap, path)
#import datetime
#info['created_time'] = datetime.datetime.now()
#info['modified_time'] = datetime.datetime.now()
#info['accessed_time'] = datetime.datetime.now()
if info['type'] == 'filenode':
info["st_mode"] = 0x700 | statinfo.S_IFREG
elif info['type'] == 'dirnode':
info["st_mode"] = 0x700 | statinfo.S_IFDIR
return info
class TahoeLAFS(CacheFSMixin,_TahoeLAFS):
"""FS providing cached access to a Tahoe Filesystem.
This class is the preferred means to access a Tahoe filesystem. It
maintains an internal cache of recently-accessed metadata to speed
up operations.
"""
def __init__(self, *args, **kwds):
kwds.setdefault("cache_timeout",60)
super(TahoeLAFS,self).__init__(*args,**kwds)
| bsd-3-clause | 7,316,195,165,693,776,000 | 35.281928 | 96 | 0.576525 | false |
kumarkrishna/sympy | sympy/printing/latex.py | 7 | 71602 | """
A Printer which converts an expression into its LaTeX equivalent.
"""
from __future__ import print_function, division
from sympy.core import S, Add, Symbol
from sympy.core.function import _coeff_isneg
from sympy.core.sympify import SympifyError
from sympy.core.alphabets import greeks
from sympy.core.operations import AssocOp
from sympy.logic.boolalg import true
## sympy.printing imports
from .printer import Printer
from .conventions import split_super_sub, requires_partial
from .precedence import precedence, PRECEDENCE
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps
from sympy.core.compatibility import default_sort_key, range
from sympy.utilities.iterables import has_variety
import re
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at http://www.mathjax.org/docs/1.1/tex.html#supported-latex-commands
# This variable only contains those functions which sympy uses.
accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan',
'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec', 'csc',
'cot', 'coth', 're', 'im', 'frac', 'root', 'arg',
]
tex_greek_dictionary = {
'Alpha': 'A',
'Beta': 'B',
'Epsilon': 'E',
'Zeta': 'Z',
'Eta': 'H',
'Iota': 'I',
'Kappa': 'K',
'Mu': 'M',
'Nu': 'N',
'omicron': 'o',
'Omicron': 'O',
'Rho': 'P',
'Tau': 'T',
'Chi': 'X',
'lamda': r'\lambda',
'Lamda': r'\Lambda',
'khi': r'\chi',
'Khi': r'X',
'varepsilon': r'\varepsilon',
'varkappa': r'\varkappa',
'varphi': r'\varphi',
'varpi': r'\varpi',
'varrho': r'\varrho',
'varsigma': r'\varsigma',
'vartheta': r'\vartheta',
}
other_symbols = set(['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar',
'hslash', 'mho', 'wp', ])
# Variable name modifiers
modifier_dict = {
# Accents
'mathring': lambda s: r'\mathring{'+s+r'}',
'ddddot': lambda s: r'\ddddot{'+s+r'}',
'dddot': lambda s: r'\dddot{'+s+r'}',
'ddot': lambda s: r'\ddot{'+s+r'}',
'dot': lambda s: r'\dot{'+s+r'}',
'check': lambda s: r'\check{'+s+r'}',
'breve': lambda s: r'\breve{'+s+r'}',
'acute': lambda s: r'\acute{'+s+r'}',
'grave': lambda s: r'\grave{'+s+r'}',
'tilde': lambda s: r'\tilde{'+s+r'}',
'hat': lambda s: r'\hat{'+s+r'}',
'bar': lambda s: r'\bar{'+s+r'}',
'vec': lambda s: r'\vec{'+s+r'}',
'prime': lambda s: "{"+s+"}'",
'prm': lambda s: "{"+s+"}'",
# Faces
'bold': lambda s: r'\boldsymbol{'+s+r'}',
'bm': lambda s: r'\boldsymbol{'+s+r'}',
'cal': lambda s: r'\mathcal{'+s+r'}',
'scr': lambda s: r'\mathscr{'+s+r'}',
'frak': lambda s: r'\mathfrak{'+s+r'}',
# Brackets
'norm': lambda s: r'\left\|{'+s+r'}\right\|',
'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle',
'abs': lambda s: r'\left|{'+s+r'}\right|',
'mag': lambda s: r'\left|{'+s+r'}\right|',
}
greek_letters_set = frozenset(greeks)
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"order": None,
"mode": "plain",
"itex": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"long_frac_ratio": 2,
"mul_symbol": None,
"inv_trig_style": "abbreviated",
"mat_str": None,
"mat_delim": "[",
"symbol_names": {},
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
self._delim_dict = {'(': ')', '[': ']'}
def parenthesize(self, item, level):
if precedence(item) <= level:
return r"\left(%s\right)" % self._print(item)
else:
return self._print(item)
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in a Mul.
"""
from sympy import Integral, Piecewise, Product, Sum
if expr.is_Add:
return True
elif expr.is_Relational:
return True
elif expr.is_Mul:
if not first and _coeff_isneg(expr):
return True
if (not last and
any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])):
return True
return False
def _needs_add_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_bool(self, e):
return r"\mathrm{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\mathrm{%s}" % e
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif _coeff_isneg(term):
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
return str_real
def _print_Mul(self, expr):
if _coeff_isneg(expr):
expr = -expr
tex = "- "
else:
tex = ""
from sympy.simplify import fraction
numer, denom = fraction(expr, exact=True)
separator = self._settings['mul_symbol_latex']
numbersep = self._settings['mul_symbol_latex_numbers']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
_tex = last_term_tex = ""
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = expr.args
for i, term in enumerate(args):
term_tex = self._print(term)
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if re.search("[0-9][} ]*$", last_term_tex) and \
re.match("[{ ]*[-+0-9]", term_tex):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
if denom is S.One:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] \
and ldenom <= 2 and not "^" in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
return tex
def _print_Pow(self, expr):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq, base)
else:
tex = r"\sqrt[%d]{%s}" % (expq, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q
if expr.base.is_Function:
return self._print(expr.base, "%s/%s" % (p, q))
if self._needs_brackets(expr.base):
return r"\left(%s\right)^{%s/%s}" % (base, p, q)
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_commutative:
# Things like 1/x
return self._print_Mul(expr)
else:
if expr.base.is_Function:
return self._print(expr.base, self._print(expr.exp))
else:
if expr.is_commutative and expr.exp == -1:
#solves issue 4129
#As Mul always simplify 1/x to x**-1
#The objective is achieved with this hack
#first we get the latex for -1 * expr,
#which is a Mul expression
tex = self._print(S.NegativeOne * expr).strip()
#the result comes with a minus and a space, so we remove
if tex[:1] == "-":
return tex[1:].strip()
if self._needs_brackets(expr.base):
tex = r"\left(%s\right)^{%s}"
else:
tex = r"%s^{%s}"
return tex % (self._print(expr.base),
self._print(expr.exp))
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
o1 = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x:x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = '(' + LatexPrinter().doprint(v) + ')'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex = self._print(expr.base)+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Derivative(self, expr):
dim = len(expr.variables)
if requires_partial(expr):
diff_symbol = r'\partial'
else:
diff_symbol = r'd'
if dim == 1:
tex = r"\frac{%s}{%s %s}" % (diff_symbol, diff_symbol,
self._print(expr.variables[0]))
else:
multiplicity, i, tex = [], 1, ""
current = expr.variables[0]
for symbol in expr.variables[1:]:
if symbol == current:
i = i + 1
else:
multiplicity.append((current, i))
current, i = symbol, 1
else:
multiplicity.append((current, i))
for x, i in multiplicity:
if i == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol, self._print(x), i)
tex = r"\frac{%s^{%s}}{%s} " % (diff_symbol, dim, tex)
if isinstance(expr.expr, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(expr.expr))
else:
return r"%s %s" % (tex, self._print(expr.expr))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr, latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, d%s" % self._print(symbol[0])
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] in ['equation', 'equation*'] \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, d%s" % self._print(symbol))
return r"%s %s%s" % (tex,
str(self._print(expr.function)), "".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func):
'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\'):
name = func
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr, exp=None):
'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [ str(self._print(arg)) for arg in expr.args ]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = ["asin", "acos", "atan", "acot"]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
func = func
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
name = r'%s^{%s}' % (self._hprint_Function(func), exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left (%s \right )}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_FunctionClass(self, expr):
if hasattr(expr, '_latex_no_arg'):
return expr._latex_no_arg(self)
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
args = (symbols, self._print(expr))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_Min(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\min\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Max(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\max\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_floor(self, expr, exp=None):
tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\lceil{%s}\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Determinant = _print_Abs
def _print_re(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0])
else:
tex = r"\Re{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0])
else:
tex = r"\Im{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy import Equivalent, Implies
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\equiv")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg (%s)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\equiv")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left (%s \right )}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]), \
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_gamma(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"!\left(%s\right)" % self._print(x)
else:
tex = "!" + self._print(x)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!" % self._print(x)
else:
tex = self._print(x) + "!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!!" % self._print(x)
else:
tex = self._print(x) + "!!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(n):
base = r"\left(%s\right)" % self._print(n)
else:
base = self._print(n)
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(k):
sub = r"\left(%s\right)" % self._print(k)
else:
sub = self._print(k)
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym):
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, self._print(exp))
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec):
if len(vec) == 0:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _hprint_airy(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (self._print(exp), tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (self._print(exp), tex)
return r"\zeta%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (self._print(exp), tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, self._print(exp), tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif len(expr.variables):
s += self._print(expr.variables[0])
s += r'\rightarrow'
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"\mathcal{O}\left(%s\right)" % s
def _print_Symbol(self, expr):
if expr in self._settings['symbol_names']:
return self._settings['symbol_names'][expr]
return self._deal_with_super_sub(expr.name) if \
'\\' not in expr.name else expr.name
_print_RandomSymbol = _print_Symbol
_print_MatrixSymbol = _print_Symbol
def _deal_with_super_sub(self, string):
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# glue all items together:
if len(supers) > 0:
name += "^{%s}" % " ".join(supers)
if len(subs) > 0:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_MatrixBase(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([ self._print(i) for i in expr[line, :] ]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
_print_ImmutableMatrix = _print_MatrixBase
_print_Matrix = _print_MatrixBase
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '_{%s, %s}'%(expr.i, expr.j)
def _print_MatrixSlice(self, expr):
def latexslice(x):
x = list(x)
if x[2] == 1:
del x[2]
if x[1] == x[0] + 1:
del x[1]
if x[0] == 0:
x[0] = ''
return ':'.join(map(self._print, x))
return (self._print(expr.parent) + r'\left[' +
latexslice(expr.rowslice) + ', ' +
latexslice(expr.colslice) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^T" % self._print(mat)
else:
return "%s^T" % self._print(mat)
def _print_Adjoint(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^\dag" % self._print(mat)
else:
return "%s^\dag" % self._print(mat)
def _print_MatAdd(self, expr):
terms = list(expr.args)
tex = " + ".join(map(self._print, terms))
return tex
def _print_MatMul(self, expr):
from sympy import Add, MatAdd, HadamardProduct
def parens(x):
if isinstance(x, (Add, MatAdd, HadamardProduct)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' '.join(map(parens, expr.args))
def _print_HadamardProduct(self, expr):
from sympy import Add, MatAdd, MatMul
def parens(x):
if isinstance(x, (Add, MatAdd, MatMul)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' \circ '.join(map(parens, expr.args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol):
return r"\left(%s\right)^{%s}" % (self._print(base), self._print(exp))
else:
return "%s^{%s}" % (self._print(base), self._print(exp))
def _print_ZeroMatrix(self, Z):
return r"\mathbb{0}"
def _print_Identity(self, I):
return r"\mathbb{I}"
def _print_tuple(self, expr):
return r"\left ( %s\right )" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
return r"\left [ %s\right ]" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left \{ %s\right \}" % r", \quad ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_Heaviside(self, expr, exp=None):
tex = r"\theta\left(%s\right)" % self._print(expr.args[0])
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_ProductSet(self, p):
if len(p.sets) > 1 and not has_variety(p.sets):
return self._print(p.sets[0]) + "^%d" % len(p.sets)
else:
return r" \times ".join(self._print(set) for set in p.sets)
def _print_RandomDomain(self, d):
try:
return 'Domain: ' + self._print(d.as_boolean())
except Exception:
try:
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
except:
return 'Domain on ' + self._print(d.symbols)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
items = ", ".join(map(self._print, items))
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
if len(s) > 4:
it = iter(s)
printset = next(it), next(it), '\ldots', s._last_element
else:
printset = tuple(s)
return (r"\left\{"
+ r", ".join(self._print(el) for el in printset)
+ r"\right\}")
def _print_SeqFormula(self, s):
if s.start is S.NegativeInfinity:
stop = s.stop
printset = ('\ldots', s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append('\ldots')
else:
printset = tuple(s)
return (r"\left\["
+ r", ".join(self._print(el) for el in printset)
+ r"\right\]")
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_Union(self, u):
return r" \cup ".join([self._print(i) for i in u.args])
def _print_Complement(self, u):
return r" \setminus ".join([self._print(i) for i in u.args])
def _print_Intersection(self, u):
return r" \cap ".join([self._print(i) for i in u.args])
def _print_SymmetricDifference(self, u):
return r" \triangle ".join([self._print(i) for i in u.args])
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_Complexes(self, i):
return r"\mathbb{C}"
def _print_ImageSet(self, s):
return r"\left\{%s\; |\; %s \in %s\right\}" % (
self._print(s.lamda.expr),
', '.join([self._print(var) for var in s.lamda.variables]),
self._print(s.base_set))
def _print_ConditionSet(self, s):
vars_print = ', '.join([self._print(var) for var in s.condition.variables])
return r"\left\{%s\; |\; %s \in %s \wedge %s \right\}" % (
vars_print,
vars_print,
self._print(s.base_set),
self._print(s.condition.expr))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FourierSeries(self, s):
return self._print_Add(s.truncate()) + self._print(' + \ldots')
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.truncate())
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.truncate())
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
expr = self._print(poly.as_expr())
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left (%s \right )}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_RootOf(self, root):
cls = root.__class__.__name__
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr, index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls, ", ".join(args))
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr):
return r"E_{%s}" % self._print(expr.args[0])
def _print_catalan(self, expr):
return r"C_{%s}" % self._print(expr.args[0])
def _print_MellinTransform(self, expr):
return r"\mathcal{M}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseMellinTransform(self, expr):
return r"\mathcal{M}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_LaplaceTransform(self, expr):
return r"\mathcal{L}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseLaplaceTransform(self, expr):
return r"\mathcal{L}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_FourierTransform(self, expr):
return r"\mathcal{F}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseFourierTransform(self, expr):
return r"\mathcal{F}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_SineTransform(self, expr):
return r"\mathcal{SIN}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseSineTransform(self, expr):
return r"\mathcal{SIN}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_CosineTransform(self, expr):
return r"\mathcal{COS}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseCosineTransform(self, expr):
return r"\mathcal{COS}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return "\\mathbf{%s}" % self._print(Symbol(morphism.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{%s}^{%s}' % (self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ %s \right]" % ",".join(
'{' + self._print(x) + '}' for x in m)
def _print_SubModule(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for x in m.gens)
def _print_ModuleImplementedIdeal(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for [x] in m._module.gens)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(R.ring), self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
return r"{%s} + {%s}" % (self._print(x.data), self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
return r"{%s} + {%s}" % (self._print(m.data),
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{%s} : {%s} \to {%s}" % (self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_BaseScalarField(self, field):
string = field._coord_sys._names[field._index]
return r'\boldsymbol{\mathrm{%s}}' % self._print(Symbol(string))
def _print_BaseVectorField(self, field):
string = field._coord_sys._names[field._index]
return r'\partial_{%s}' % self._print(Symbol(string))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys._names[field._index]
return r'\mathrm{d}%s' % self._print(Symbol(string))
else:
return 'd(%s)' % self._print(field)
string = self._print(field)
return r'\mathrm{d}\left(%s\right)' % string
def _print_Tr(self, p):
#Todo: Handle indices
contents = self._print(p.args[0])
return r'\mbox{Tr}\left(%s\right)' % (contents)
def _print_totient(self, expr):
return r'\phi\left( %s \right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (self._print(exp), tex)
return r"\sigma%s" % tex
def _print_udivisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^*^{%s}%s" % (self._print(exp), tex)
return r"\sigma^*%s" % tex
def translate(s):
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set or s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True):
if s.lower().endswith(key) and len(s)>len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
def latex(expr, **settings):
r"""
Convert the given expression to LaTeX representation.
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational
>>> from sympy.abc import x, y, mu, r, tau
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
order: Any of the supported monomial orderings (currently "lex", "grlex", or
"grevlex"), "old", and "none". This parameter does nothing for Mul objects.
Setting order to "old" uses the compatibility ordering for Add defined in
Printer. For very large expressions, set the 'order' keyword to 'none' if
speed is a concern.
mode: Specifies how the generated code will be delimited. 'mode' can be one
of 'plain', 'inline', 'equation' or 'equation*'. If 'mode' is set to
'plain', then the resulting code will not be delimited at all (this is the
default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be used.
If 'mode' is set to 'equation' or 'equation*', the resulting code will be
enclosed in the 'equation' or 'equation*' environment (remember to import
'amsmath' for 'equation*'), unless the 'itex' option is set. In the latter
case, the ``$$ $$`` syntax is used.
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{\frac{7}{2}}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
itex: Specifies if itex-specific syntax is used, including emitting ``$$ $$``.
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
fold_frac_powers: Emit "^{p/q}" instead of "^{\frac{p}{q}}" for fractional
powers.
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
fold_func_brackets: Fold function brackets where applicable.
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets = True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
fold_short_frac: Emit "p / q" instead of "\frac{p}{q}" when the
denominator is simple enough (at most two terms and no powers).
The default value is `True` for inline mode, False otherwise.
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
long_frac_ratio: The allowed ratio of the width of the numerator to the
width of the denominator before we start breaking off long fractions.
The default value is 2.
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
mul_symbol: The symbol to use for multiplication. Can be one of None,
"ldot", "dot", or "times".
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
inv_trig_style: How inverse trig functions should be displayed. Can be one
of "abbreviated", "full", or "power". Defaults to "abbreviated".
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left (\frac{7}{2} \right )}
mat_str: Which matrix environment string to emit. "smallmatrix", "matrix",
"array", etc. Defaults to "smallmatrix" for inline mode, "matrix" for
matrices of no more than 10 columns, and "array" otherwise.
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
mat_delim: The delimiter to wrap around matrices. Can be one of "[", "(",
or the empty string. Defaults to "[".
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
symbol_names: Dictionary of symbols and the custom strings they should be
emitted as.
>>> print(latex(x**2, symbol_names={x:'x_i'}))
x_i^{2}
``latex`` also supports the builtin container types list, tuple, and
dictionary.
>>> print(latex([2/x, y], mode='inline'))
$\left [ 2 / x, \quad y\right ]$
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression."""
print(latex(expr, **settings))
| bsd-3-clause | 3,123,879,096,282,475,000 | 34.569796 | 155 | 0.511452 | false |
codecollision/DropboxToFlickr | django/contrib/localflavor/br/forms.py | 308 | 5803 | # -*- coding: utf-8 -*-
"""
BR-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, CharField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(\d{2})[-\.]?(\d{4})[-\.]?(\d{4})$')
class BRZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(BRZipCodeField, self).__init__(r'^\d{5}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class BRPhoneNumberField(Field):
default_error_messages = {
'invalid': _('Phone numbers must be in XX-XXXX-XXXX format.'),
}
def clean(self, value):
super(BRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class BRStateSelect(Select):
"""
A Select widget that uses a list of Brazilian states/territories
as its choices.
"""
def __init__(self, attrs=None):
from br_states import STATE_CHOICES
super(BRStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class BRStateChoiceField(Field):
"""
A choice field that uses a list of Brazilian states as its choices.
"""
widget = Select
default_error_messages = {
'invalid': _(u'Select a valid brazilian state. That state is not one of the available states.'),
}
def __init__(self, required=True, widget=None, label=None,
initial=None, help_text=None):
super(BRStateChoiceField, self).__init__(required, widget, label,
initial, help_text)
from br_states import STATE_CHOICES
self.widget.choices = STATE_CHOICES
def clean(self, value):
value = super(BRStateChoiceField, self).clean(value)
if value in EMPTY_VALUES:
value = u''
value = smart_unicode(value)
if value == u'':
return value
valid_values = set([smart_unicode(k) for k, v in self.widget.choices])
if value not in valid_values:
raise ValidationError(self.error_messages['invalid'])
return value
def DV_maker(v):
if v >= 2:
return 11 - v
return 0
class BRCPFField(CharField):
"""
This field validate a CPF number or a CPF string. A CPF number is
compounded by XXX.XXX.XXX-VD. The two last digits are check digits.
More information:
http://en.wikipedia.org/wiki/Cadastro_de_Pessoas_F%C3%ADsicas
"""
default_error_messages = {
'invalid': _("Invalid CPF number."),
'max_digits': _("This field requires at most 11 digits or 14 characters."),
'digits_only': _("This field requires only numbers."),
}
def __init__(self, *args, **kwargs):
super(BRCPFField, self).__init__(max_length=14, min_length=11, *args, **kwargs)
def clean(self, value):
"""
Value can be either a string in the format XXX.XXX.XXX-XX or an
11-digit number.
"""
value = super(BRCPFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
orig_value = value[:]
if not value.isdigit():
value = re.sub("[-\.]", "", value)
try:
int(value)
except ValueError:
raise ValidationError(self.error_messages['digits_only'])
if len(value) != 11:
raise ValidationError(self.error_messages['max_digits'])
orig_dv = value[-2:]
new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))])
new_1dv = DV_maker(new_1dv % 11)
value = value[:-2] + str(new_1dv) + value[-1]
new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))])
new_2dv = DV_maker(new_2dv % 11)
value = value[:-1] + str(new_2dv)
if value[-2:] != orig_dv:
raise ValidationError(self.error_messages['invalid'])
return orig_value
class BRCNPJField(Field):
default_error_messages = {
'invalid': _("Invalid CNPJ number."),
'digits_only': _("This field requires only numbers."),
'max_digits': _("This field requires at least 14 digits"),
}
def clean(self, value):
"""
Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a
group of 14 characters.
"""
value = super(BRCNPJField, self).clean(value)
if value in EMPTY_VALUES:
return u''
orig_value = value[:]
if not value.isdigit():
value = re.sub("[-/\.]", "", value)
try:
int(value)
except ValueError:
raise ValidationError(self.error_messages['digits_only'])
if len(value) != 14:
raise ValidationError(self.error_messages['max_digits'])
orig_dv = value[-2:]
new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(5, 1, -1) + range(9, 1, -1))])
new_1dv = DV_maker(new_1dv % 11)
value = value[:-2] + str(new_1dv) + value[-1]
new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(6, 1, -1) + range(9, 1, -1))])
new_2dv = DV_maker(new_2dv % 11)
value = value[:-1] + str(new_2dv)
if value[-2:] != orig_dv:
raise ValidationError(self.error_messages['invalid'])
return orig_value
| bsd-3-clause | 8,973,599,333,357,177,000 | 34.601227 | 104 | 0.579528 | false |
catapult-project/catapult | third_party/gsutil/gslib/vendored/boto/boto/swf/layer1_decisions.py | 153 | 11938 | """
Helper class for creating decision responses.
"""
class Layer1Decisions(object):
"""
Use this object to build a list of decisions for a decision response.
Each method call will add append a new decision. Retrieve the list
of decisions from the _data attribute.
"""
def __init__(self):
self._data = []
def schedule_activity_task(self,
activity_id,
activity_type_name,
activity_type_version,
task_list=None,
control=None,
heartbeat_timeout=None,
schedule_to_close_timeout=None,
schedule_to_start_timeout=None,
start_to_close_timeout=None,
input=None):
"""
Schedules an activity task.
:type activity_id: string
:param activity_id: The activityId of the type of the activity
being scheduled.
:type activity_type_name: string
:param activity_type_name: The name of the type of the activity
being scheduled.
:type activity_type_version: string
:param activity_type_version: The version of the type of the
activity being scheduled.
:type task_list: string
:param task_list: If set, specifies the name of the task list in
which to schedule the activity task. If not specified, the
defaultTaskList registered with the activity type will be used.
Note: a task list for this activity task must be specified either
as a default for the activity type or through this field. If
neither this field is set nor a default task list was specified
at registration time then a fault will be returned.
"""
o = {}
o['decisionType'] = 'ScheduleActivityTask'
attrs = o['scheduleActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
attrs['activityType'] = {
'name': activity_type_name,
'version': activity_type_version,
}
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if control is not None:
attrs['control'] = control
if heartbeat_timeout is not None:
attrs['heartbeatTimeout'] = heartbeat_timeout
if schedule_to_close_timeout is not None:
attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout
if schedule_to_start_timeout is not None:
attrs['scheduleToStartTimeout'] = schedule_to_start_timeout
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_activity_task(self, activity_id):
"""
Attempts to cancel a previously scheduled activity task. If
the activity task was scheduled but has not been assigned to a
worker, then it will be canceled. If the activity task was
already assigned to a worker, then the worker will be informed
that cancellation has been requested in the response to
RecordActivityTaskHeartbeat.
"""
o = {}
o['decisionType'] = 'RequestCancelActivityTask'
attrs = o['requestCancelActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
self._data.append(o)
def record_marker(self, marker_name, details=None):
"""
Records a MarkerRecorded event in the history. Markers can be
used for adding custom information in the history for instance
to let deciders know that they do not need to look at the
history beyond the marker event.
"""
o = {}
o['decisionType'] = 'RecordMarker'
attrs = o['recordMarkerDecisionAttributes'] = {}
attrs['markerName'] = marker_name
if details is not None:
attrs['details'] = details
self._data.append(o)
def complete_workflow_execution(self, result=None):
"""
Closes the workflow execution and records a WorkflowExecutionCompleted
event in the history
"""
o = {}
o['decisionType'] = 'CompleteWorkflowExecution'
attrs = o['completeWorkflowExecutionDecisionAttributes'] = {}
if result is not None:
attrs['result'] = result
self._data.append(o)
def fail_workflow_execution(self, reason=None, details=None):
"""
Closes the workflow execution and records a
WorkflowExecutionFailed event in the history.
"""
o = {}
o['decisionType'] = 'FailWorkflowExecution'
attrs = o['failWorkflowExecutionDecisionAttributes'] = {}
if reason is not None:
attrs['reason'] = reason
if details is not None:
attrs['details'] = details
self._data.append(o)
def cancel_workflow_executions(self, details=None):
"""
Closes the workflow execution and records a WorkflowExecutionCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelWorkflowExecution'
attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {}
if details is not None:
attrs['details'] = details
self._data.append(o)
def continue_as_new_workflow_execution(self,
child_policy=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
start_to_close_timeout=None,
workflow_type_version=None):
"""
Closes the workflow execution and starts a new workflow execution of
the same type using the same workflow id and a unique run Id. A
WorkflowExecutionContinuedAsNew event is recorded in the history.
"""
o = {}
o['decisionType'] = 'ContinueAsNewWorkflowExecution'
attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {}
if child_policy is not None:
attrs['childPolicy'] = child_policy
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = start_to_close_timeout
if workflow_type_version is not None:
attrs['workflowTypeVersion'] = workflow_type_version
self._data.append(o)
def start_timer(self,
start_to_fire_timeout,
timer_id,
control=None):
"""
Starts a timer for this workflow execution and records a TimerStarted
event in the history. This timer will fire after the specified delay
and record a TimerFired event.
"""
o = {}
o['decisionType'] = 'StartTimer'
attrs = o['startTimerDecisionAttributes'] = {}
attrs['startToFireTimeout'] = start_to_fire_timeout
attrs['timerId'] = timer_id
if control is not None:
attrs['control'] = control
self._data.append(o)
def cancel_timer(self, timer_id):
"""
Cancels a previously started timer and records a TimerCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelTimer'
attrs = o['cancelTimerDecisionAttributes'] = {}
attrs['timerId'] = timer_id
self._data.append(o)
def signal_external_workflow_execution(self,
workflow_id,
signal_name,
run_id=None,
control=None,
input=None):
"""
Requests a signal to be delivered to the specified external workflow
execution and records a SignalExternalWorkflowExecutionInitiated
event in the history.
"""
o = {}
o['decisionType'] = 'SignalExternalWorkflowExecution'
attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
attrs['signalName'] = signal_name
if run_id is not None:
attrs['runId'] = run_id
if control is not None:
attrs['control'] = control
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_external_workflow_execution(self,
workflow_id,
control=None,
run_id=None):
"""
Requests that a request be made to cancel the specified
external workflow execution and records a
RequestCancelExternalWorkflowExecutionInitiated event in the
history.
"""
o = {}
o['decisionType'] = 'RequestCancelExternalWorkflowExecution'
attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
if control is not None:
attrs['control'] = control
if run_id is not None:
attrs['runId'] = run_id
self._data.append(o)
def start_child_workflow_execution(self,
workflow_type_name,
workflow_type_version,
workflow_id,
child_policy=None,
control=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
task_start_to_close_timeout=None):
"""
Requests that a child workflow execution be started and
records a StartChildWorkflowExecutionInitiated event in the
history. The child workflow execution is a separate workflow
execution with its own history.
"""
o = {}
o['decisionType'] = 'StartChildWorkflowExecution'
attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowType'] = {
'name': workflow_type_name,
'version': workflow_type_version,
}
attrs['workflowId'] = workflow_id
if child_policy is not None:
attrs['childPolicy'] = child_policy
if control is not None:
attrs['control'] = control
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if task_start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout
self._data.append(o)
| bsd-3-clause | -1,264,611,811,870,793,700 | 40.595819 | 84 | 0.550176 | false |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/core/servers/fastcgi.py | 170 | 6631 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is an adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import importlib
import os
import sys
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default %(method)s).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s).
minspare=NUMBER min number of spare processes / threads (default %(minspare)s).
maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s).
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s).
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
""" % FASTCGI_OPTIONS
def fastcgi_help(message=None):
print(FASTCGI_HELP)
if message:
print(message)
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup # NOQA
except ImportError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.stderr.write(" Unable to load the flup package. In order to run django\n")
sys.stderr.write(" as a FastCGI application, you will need to get flup from\n")
sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n")
sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n")
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or "
"thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except Exception:
print("Can't import flup." + flup_module)
return False
# Prep up and go
from django.core.servers.basehttp import get_internal_wsgi_application
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize "
"parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
with open(options["pidfile"], "w") as fp:
fp.write("%d\n" % os.getpid())
WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| mit | 3,992,909,658,636,593,000 | 34.459893 | 94 | 0.625848 | false |
sajuptpm/murano | murano/dsl/exceptions.py | 1 | 3632 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class InternalFlowException(Exception):
pass
class ReturnException(InternalFlowException):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
class BreakException(InternalFlowException):
pass
class ContinueException(InternalFlowException):
pass
class DslInvalidOperationError(Exception):
pass
class NoMethodFound(Exception):
def __init__(self, name):
super(NoMethodFound, self).__init__('Method "%s" is not found' % name)
class NoClassFound(Exception):
def __init__(self, name):
super(NoClassFound, self).__init__('Class "%s" is not found' % name)
class NoPackageFound(Exception):
def __init__(self, name):
super(NoPackageFound, self).__init__(
'Package "%s" is not found' % name)
class NoPackageForClassFound(Exception):
def __init__(self, name):
super(NoPackageForClassFound, self).__init__('Package for class "%s" '
'is not found' % name)
class NoObjectFoundError(Exception):
def __init__(self, object_id):
super(NoObjectFoundError, self).__init__(
'Object "%s" is not found in object store' % object_id)
class AmbiguousMethodName(Exception):
def __init__(self, name):
super(AmbiguousMethodName, self).__init__(
'Found more that one method "%s"' % name)
class DslContractSyntaxError(Exception):
pass
class ContractViolationException(Exception):
pass
class ValueIsMissingError(Exception):
pass
class DslSyntaxError(Exception):
pass
class PropertyAccessError(Exception):
pass
class AmbiguousPropertyNameError(PropertyAccessError):
def __init__(self, name):
super(AmbiguousPropertyNameError, self).__init__(
'Found more that one property "%s"' % name)
class NoWriteAccess(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccess, self).__init__(
'Property "%s" is immutable to the caller' % name)
class NoWriteAccessError(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccessError, self).__init__(
'Property "%s" is immutable to the caller' % name)
class PropertyReadError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be read' %
(name, murano_class.name))
class PropertyWriteError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be written' %
(name, murano_class.name))
class UninitializedPropertyAccessError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Access to uninitialized property '
'"%s" in class "%s" is forbidden' % (name, murano_class.name))
| apache-2.0 | 4,971,076,513,574,529,000 | 26.725191 | 78 | 0.651982 | false |
rafafigueroa/amrws | src/amrpkg/scripts/virtual_tracking.py | 1 | 5059 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseStamped
from tf.transformations import euler_from_quaternion
import numpy as np
def minAngle(ang):
return np.arctan2(np.sin(ang), np.cos(ang))
def orientation_to_yaw(orientation):
quat_list = [0, 0, 0, 0]
quat_list[0] = orientation.x
quat_list[1] = orientation.y
quat_list[2] = orientation.z
quat_list[3] = orientation.w
(roll, pitch, yaw) = euler_from_quaternion(quat_list)
return yaw
class SimModel(object):
"""Provides a consistent usage of simulation models"""
def __init__(self, control):
self.control = control
class SimMain(object):
def __init__(self, model, hz = 50.0):
self.state = [None]*6
self.model = model
rospy.init_node('amr_control')
self.rate = rospy.Rate(hz)
self.pub = rospy.Publisher('/mobile_base/commands/velocity', Twist,
queue_size=10)
rospy.Subscriber('/odom', Odometry, self.state_callback)
rospy.Subscriber('/virtual_agent/pose', PoseStamped,
self.virtual_state_callback)
print("simulation initialized")
#TODO: Wait for topic
def state_callback(self, robot_od):
x = robot_od.pose.pose.position.x
y = robot_od.pose.pose.position.y
h = orientation_to_yaw(robot_od.pose.pose.orientation)
#print 'state', x, y , h
self.state[0] = x
self.state[1] = y
self.state[2] = h
def virtual_state_callback(self, robot_ps):
xr = robot_ps.pose.position.x
yr = robot_ps.pose.position.y
hr = orientation_to_yaw(robot_ps.pose.orientation)
self.state[3] = xr
self.state[4] = yr
self.state[5] = hr
#print 'virtual', xr, yr, hr
def run(self):
print("simulation running")
while not rospy.is_shutdown():
if (self.state[0] is not None) and \
(self.state[3] is not None):
u = self.model.control(self.state)
v = u[0]
w = u[1]
robot_tw = Twist()
robot_tw.linear.x = v
robot_tw.angular.z = w
self.pub.publish(robot_tw)
self.rate.sleep()
#TODO: Make general, currently copy/paste from virtual
vr = 0.5
wr = -0.05
def control_virtual_linear(X):
x = X[0]
y = X[1]
h = X[2]
xr = X[3]
yr = X[4]
hr = X[5]
# (s+2*xi*alpha)*(s**2 + 2*xi*alpha*s + alpha**2)
# poles at -p1r +- p1i*j and -p2
# (s+p2)*(s+p1r+p1i)*(s+p1r-p1i)
# (s+p2)*(s**2 + 2*p1r*s + p1r**2 + p1i**2)
# 2*xi*alpha = p2
# 2*xi*alpha = 2*p1r
# alpha**2 = p1r**2 + p1i**2
# for p1r = 1, p1i = 1, p2 = 2*p1r = 2
# Linear
p1r = 1
p1i = 0.3
p2 = 2*p1r
alpha = np.sqrt(p1r**2+p1i**2)
xi = p2/float(2*alpha)
b = (alpha**2 - wr**2)/float(vr**2)
k1 = 2*xi*alpha
k2 = b * np.abs(vr)
k3 = k1
ex = xr - x
ey = yr - y
eh = minAngle(hr - h)
e1 = ex*np.cos(h) + ey*np.sin(h)
e2 = -ex*np.sin(h) + ey*np.cos(h)
e3 = eh
u1 = -k1 * e1
u2 = -k2 * np.sign(vr) * e2 - k3 * e3
v = vr * np.cos(e3) - u1
w = wr - u2
Erms = np.sqrt(e1**2 + e2**2 + e3**2)
print 'Erms:', Erms, 'v', v, 'w', w
return [v, w]
def control_virtual_nonlinear(X):
x = X[0]
y = X[1]
h = X[2]
xr = X[3]
yr = X[4]
hr = X[5]
# (s+2*xi*alpha)*(s**2 + 2*xi*alpha*s + alpha**2)
# poles at -p1r +- p1i*j and -p2
# (s+p2)*(s+p1r+p1i)*(s+p1r-p1i)
# (s+p2)*(s**2 + 2*p1r*s + p1r**2 + p1i**2)
# 2*xi*alpha = p2
# 2*xi*alpha = 2*p1r
# alpha**2 = p1r**2 + p1i**2
# for p1r = 1, p1i = 1, p2 = 2*p1r = 2
# Nonlinear
p1r = 0.5
p1i = 0.1
p2 = 2*p1r
alpha = np.sqrt(p1r**2+p1i**2)
xi = p2/float(2*alpha)
b = (alpha**2 - wr**2)/float(vr**2)
k1 = 2*xi*np.sqrt(wr**2 + b*vr**2)
k2 = b * np.abs(vr)
k3 = k1
k4 = b
ex = xr - x
ey = yr - y
eh = minAngle(hr - h)
e1 = ex*np.cos(h) + ey*np.sin(h)
e2 = -ex*np.sin(h) + ey*np.cos(h)
e3 = eh
u1 = -k1 * e1
u2 = -k4 * vr * np.sin(e3)/(e3+0.001) * e2 - k3 * e3
v = vr * np.cos(e3) - u1
w = wr - u2
Erms = np.sqrt(e1**2 + e2**2 + e3**2)
print 'Erms:', Erms, 'v', v, 'w', w
return [v, w]
if __name__ == '__main__':
sim_model = SimModel(control = control_virtual_nonlinear)
sim = SimMain(sim_model)
try:
sim.run()
except rospy.ROSInterruptException:
pass
| mit | 5,498,423,566,118,179,000 | 23.921182 | 76 | 0.478553 | false |
knehez/edx-platform | lms/djangoapps/shoppingcart/urls.py | 159 | 1390 | from django.conf.urls import patterns, url
from django.conf import settings
urlpatterns = patterns(
'shoppingcart.views',
url(r'^postpay_callback/$', 'postpay_callback'), # Both the ~accept and ~reject callback pages are handled here
url(r'^receipt/(?P<ordernum>[0-9]*)/$', 'show_receipt'),
url(r'^donation/$', 'donate', name='donation'),
url(r'^csv_report/$', 'csv_report', name='payment_csv_report'),
# These following URLs are only valid if the ENABLE_SHOPPING_CART feature flag is set
url(r'^$', 'show_cart'),
url(r'^clear/$', 'clear_cart'),
url(r'^remove_item/$', 'remove_item'),
url(r'^add/course/{}/$'.format(settings.COURSE_ID_PATTERN), 'add_course_to_cart', name='add_course_to_cart'),
url(r'^register/redeem/(?P<registration_code>[0-9A-Za-z]+)/$', 'register_code_redemption', name='register_code_redemption'),
url(r'^use_code/$', 'use_code'),
url(r'^update_user_cart/$', 'update_user_cart'),
url(r'^reset_code_redemption/$', 'reset_code_redemption'),
url(r'^billing_details/$', 'billing_details', name='billing_details'),
url(r'^verify_cart/$', 'verify_cart'),
)
if settings.FEATURES.get('ENABLE_PAYMENT_FAKE'):
from shoppingcart.tests.payment_fake import PaymentFakeView
urlpatterns += patterns(
'shoppingcart.tests.payment_fake',
url(r'^payment_fake', PaymentFakeView.as_view()),
)
| agpl-3.0 | -2,123,860,830,304,580,400 | 46.931034 | 128 | 0.658273 | false |
cxysteven/Paddle | python/paddle/v2/dataset/imikolov.py | 1 | 3909 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
imikolov's simple dataset.
This module will download dataset from
http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators.
"""
import paddle.v2.dataset.common
import collections
import tarfile
__all__ = ['train', 'test', 'build_dict']
URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'
def word_count(f, word_freq=None):
if word_freq is None:
word_freq = collections.defaultdict(int)
for l in f:
for w in l.strip().split():
word_freq[w] += 1
word_freq['<s>'] += 1
word_freq['<e>'] += 1
return word_freq
def build_dict():
"""
Build a word dictionary from the corpus, Keys of the dictionary are words,
and values are zero-based IDs of these words.
"""
train_filename = './simple-examples/data/ptb.train.txt'
test_filename = './simple-examples/data/ptb.valid.txt'
with tarfile.open(
paddle.v2.dataset.common.download(
paddle.v2.dataset.imikolov.URL, 'imikolov',
paddle.v2.dataset.imikolov.MD5)) as tf:
trainf = tf.extractfile(train_filename)
testf = tf.extractfile(test_filename)
word_freq = word_count(testf, word_count(trainf))
if '<unk>' in word_freq:
# remove <unk> for now, since we will set it as last index
del word_freq['<unk>']
TYPO_FREQ = 50
word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items())
word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*word_freq_sorted))
word_idx = dict(zip(words, xrange(len(words))))
word_idx['<unk>'] = len(words)
return word_idx
def reader_creator(filename, word_idx, n):
def reader():
with tarfile.open(
paddle.v2.dataset.common.download(
paddle.v2.dataset.imikolov.URL, 'imikolov',
paddle.v2.dataset.imikolov.MD5)) as tf:
f = tf.extractfile(filename)
UNK = word_idx['<unk>']
for l in f:
l = ['<s>'] + l.strip().split() + ['<e>']
if len(l) >= n:
l = [word_idx.get(w, UNK) for w in l]
for i in range(n, len(l) + 1):
yield tuple(l[i - n:i])
return reader
def train(word_idx, n):
"""
imikolov training set creator.
It returns a reader creator, each sample in the reader is a word ID
tuple.
:param word_idx: word dictionary
:type word_idx: dict
:param n: sliding window size
:type n: int
:return: Training reader creator
:rtype: callable
"""
return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n)
def test(word_idx, n):
"""
imikolov test set creator.
It returns a reader creator, each sample in the reader is a word ID
tuple.
:param word_idx: word dictionary
:type word_idx: dict
:param n: sliding window size
:type n: int
:return: Test reader creator
:rtype: callable
"""
return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n)
def fetch():
paddle.v2.dataset.common.download(URL, "imikolov", MD5)
| apache-2.0 | 2,374,105,054,414,072,300 | 29.779528 | 79 | 0.618828 | false |
peterfpeterson/mantid | scripts/Muon/GUI/Common/calculate_pair_and_group.py | 3 | 5823 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import Muon.GUI.Common.utilities.algorithm_utils as algorithm_utils
from Muon.GUI.Common.utilities.run_string_utils import run_list_to_string
from Muon.GUI.Common.muon_pair import MuonPair
from typing import Iterable
def calculate_group_data(context, group, run, rebin, workspace_name, periods):
processed_data = get_pre_process_workspace_name(run, context.data_context.instrument)
params = _get_MuonGroupingCounts_parameters(group, periods)
params["InputWorkspace"] = processed_data
group_data = algorithm_utils.run_MuonGroupingCounts(params, workspace_name)
return group_data
def calculate_pair_data(pair: MuonPair, forward_group: str, backward_group: str, output_workspace_name: str):
params = _get_MuonPairingAsymmetry_parameters(pair, forward_group, backward_group)
pair_data = algorithm_utils.run_MuonPairingAsymmetry(params, output_workspace_name)
return pair_data
def estimate_group_asymmetry_data(context, group, run, rebin, workspace_name, unormalised_workspace_name, periods):
processed_data = get_pre_process_workspace_name(run, context.data_context.instrument)
params = _get_MuonGroupingAsymmetry_parameters(context, group, run, periods)
params["InputWorkspace"] = processed_data
group_asymmetry, group_asymmetry_unnorm = algorithm_utils.run_MuonGroupingAsymmetry(params, workspace_name,
unormalised_workspace_name)
return group_asymmetry, group_asymmetry_unnorm
def run_pre_processing(context, run, rebin):
params = _get_pre_processing_params(context, run, rebin)
params["InputWorkspace"] = context.data_context.loaded_workspace_as_group(run)
processed_data = algorithm_utils.run_MuonPreProcess(params)
return processed_data
def get_pre_process_workspace_name(run: Iterable[int], instrument: str) -> str:
workspace_name = "".join(["__", instrument, run_list_to_string(run), "_pre_processed_data"])
return workspace_name
def _get_pre_processing_params(context, run, rebin):
pre_process_params = {}
try:
if context.gui_context['FirstGoodDataFromFile']:
time_min = context.data_context.get_loaded_data_for_run(run)["FirstGoodData"]
else:
time_min = context.gui_context['FirstGoodData']
pre_process_params["TimeMin"] = time_min
except KeyError:
pass
try:
if context.gui_context['TimeZeroFromFile']:
time_offset = 0.0
else:
time_offset = context.data_context.get_loaded_data_for_run(run)["TimeZero"] - context.gui_context[
'TimeZero']
pre_process_params["TimeOffset"] = time_offset
except KeyError:
pass
if rebin:
_setup_rebin_options(context, pre_process_params, run)
try:
dead_time_table = context.corrections_context.current_dead_time_table_name_for_run(
context.data_context.instrument, run)
if dead_time_table is not None:
pre_process_params["DeadTimeTable"] = dead_time_table
except KeyError:
pass
pre_process_params["OutputWorkspace"] = get_pre_process_workspace_name(run, context.data_context.instrument)
return pre_process_params
def _setup_rebin_options(context, pre_process_params, run):
try:
if context.gui_context['RebinType'] == 'Variable' and context.gui_context["RebinVariable"]:
pre_process_params["RebinArgs"] = context.gui_context["RebinVariable"]
except KeyError:
pass
try:
if context.gui_context['RebinType'] == 'Fixed' and context.gui_context["RebinFixed"]:
x_data = context.data_context._loaded_data.get_data(run=run, instrument=context.data_context.instrument
)['workspace']['OutputWorkspace'][0].workspace.dataX(0)
original_step = x_data[1] - x_data[0]
pre_process_params["RebinArgs"] = float(context.gui_context["RebinFixed"]) * original_step
except KeyError:
pass
def _get_MuonGroupingCounts_parameters(group, periods):
params = {}
params["SummedPeriods"] = periods
if group:
params["GroupName"] = group.name
params["Grouping"] = ",".join([str(i) for i in group.detectors])
return params
def _get_MuonGroupingAsymmetry_parameters(context, group, run, periods):
params = {}
if 'GroupRangeMin' in context.gui_context:
params['AsymmetryTimeMin'] = context.gui_context['GroupRangeMin']
else:
params['AsymmetryTimeMin'] = context.data_context.get_loaded_data_for_run(run)["FirstGoodData"]
if 'GroupRangeMax' in context.gui_context:
params['AsymmetryTimeMax'] = context.gui_context['GroupRangeMax']
else:
params['AsymmetryTimeMax'] = max(
context.data_context.get_loaded_data_for_run(run)['OutputWorkspace'][0].workspace.dataX(0))
params["SummedPeriods"] = periods
if group:
params["GroupName"] = group.name
params["Grouping"] = ",".join([str(i) for i in group.detectors])
return params
def _get_MuonPairingAsymmetry_parameters(pair: MuonPair, forward_group: str, backward_group: str):
params = {}
if pair:
params["SpecifyGroupsManually"] = False
params["PairName"] = pair.name
params["InputWorkspace1"] = forward_group
params["InputWorkspace2"] = backward_group
params["Alpha"] = str(pair.alpha)
return params
| gpl-3.0 | 4,403,268,382,402,388,500 | 37.058824 | 119 | 0.673536 | false |
riteshshrv/django | django/http/__init__.py | 341 | 1103 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (
HttpRequest, QueryDict, RawPostDataException, UnreadablePostError,
)
from django.http.response import (
BadHeaderError, FileResponse, Http404, HttpResponse,
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseGone,
HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseNotModified,
HttpResponsePermanentRedirect, HttpResponseRedirect,
HttpResponseServerError, JsonResponse, StreamingHttpResponse,
)
from django.http.utils import conditional_content_removal
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'JsonResponse', 'FileResponse',
'conditional_content_removal',
]
| bsd-3-clause | -4,603,001,301,994,883,600 | 46.956522 | 78 | 0.789665 | false |
angdraug/nova | nova/tests/objects/test_instance_info_cache.py | 32 | 4744 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import instance_info_cache
from nova.tests.objects import test_objects
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': 'fake-uuid',
'network_info': '[]',
}
class _TestInstanceInfoCacheObject(object):
def test_get_by_instance_uuid(self):
nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(
dict(fake_info_cache, network_info=nwinfo.json()))
self.mox.ReplayAll()
obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(obj.instance_uuid, 'fake-uuid')
self.assertEqual(obj.network_info, nwinfo)
self.assertRemotes()
def test_get_by_instance_uuid_no_entries(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
exception.InstanceInfoCacheNotFound,
instance_info_cache.InstanceInfoCache.get_by_instance_uuid,
self.context, 'fake-uuid')
def test_new(self):
obj = instance_info_cache.InstanceInfoCache.new(self.context,
'fake-uuid')
self.assertEqual(set(['instance_uuid', 'network_info']),
obj.obj_what_changed())
self.assertEqual('fake-uuid', obj.instance_uuid)
self.assertIsNone(obj.network_info)
def _save_helper(self, cell_type, update_cells):
obj = instance_info_cache.InstanceInfoCache()
cells_api = cells_rpcapi.CellsAPI()
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
self.mox.StubOutWithMock(cells_opts, 'get_cell_type')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(cells_api,
'instance_info_cache_update_at_top')
nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
db.instance_info_cache_update(
self.context, 'fake-uuid',
{'network_info': nwinfo.json()}).AndReturn('foo')
if update_cells:
cells_opts.get_cell_type().AndReturn(cell_type)
if cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api)
cells_api.instance_info_cache_update_at_top(
self.context, 'foo')
self.mox.ReplayAll()
obj._context = self.context
obj.instance_uuid = 'fake-uuid'
obj.network_info = nwinfo
obj.save(update_cells=update_cells)
def test_save_with_update_cells_and_compute_cell(self):
self._save_helper('compute', True)
def test_save_with_update_cells_and_non_compute_cell(self):
self._save_helper(None, True)
def test_save_without_update_cells(self):
self._save_helper(None, False)
def test_refresh(self):
obj = instance_info_cache.InstanceInfoCache.new(self.context,
'fake-uuid1')
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn(
fake_info_cache)
self.mox.ReplayAll()
obj.refresh()
self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid)
class TestInstanceInfoCacheObject(test_objects._LocalTest,
_TestInstanceInfoCacheObject):
pass
class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
_TestInstanceInfoCacheObject):
pass
| apache-2.0 | 3,935,798,943,946,205,700 | 39.547009 | 78 | 0.627319 | false |
apt-helion/viperidae | data/models.py | 1 | 2405 | import datetime
from peewee import *
from pymongo import MongoClient
from .config import Config
database = Config.DATABASE
# monkey patch the DateTimeField to add support for the isoformt which is what
# peewee exports as from DataSet
DateTimeField.formats.append('%Y-%m-%dT%H:%M:%S')
DateField.formats.append('%Y-%m-%dT%H:%M:%S')
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
# Example usage
# doc = AdminDocument.create()
# doc.apply(request.form)
# doc.apply(request.json)
# doc.apply(request.json, required=['filename'], dates=['uploaddate'])
def apply_request(self, source, ignore = None, required = None, dates = None):
for field in self._meta.get_sorted_fields():
data = source.get(field)
if field == "id": continue
if field in ignore: continue
# Verify in required_fields
if field in required and data == None:
return {'error': 'Empty required field'}
if field in dates:
data = "" # strp==]===
if data is None or data == "": continue
self.__dict__[field] = data
return ""
class Meta:
database = database
class User(BaseModel):
user = CharField(column_name='user_id', null=False, primary_key=True)
username = CharField(column_name='username', null=False)
password = CharField(column_name='password', null=False)
email = CharField(column_name='email', null=False)
class Meta:
table_name = 'Users'
class Client(BaseModel):
client = CharField(column_name='client_id', null=False, primary_key=True)
secret = CharField(column_name='client_secret', null=False)
name = CharField(column_name='name', null=False)
website = CharField(column_name='website', null=False)
description = CharField(column_name='description', null=False)
user = ForeignKeyField(
column_name='user_id',
field='user',
model=User,
null=False)
@classmethod
def get_pages(cls, name):
mongo_client = MongoClient('localhost', 27017)
database = mongo_client.pages
collection = database[name]
pages = []
for page in collection.find():
pages.append(page)
return pages
class Meta:
table_name = 'Clients'
| gpl-3.0 | 6,635,790,639,644,045,000 | 29.0625 | 82 | 0.613721 | false |
brianmoose/civet | client/tests/test_JobRunner.py | 2 | 13950 |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.test import SimpleTestCase
from django.test import override_settings
from ci.tests import utils as test_utils
from client import JobRunner, BaseClient
from client.tests import utils
import os, platform
from distutils import spawn
from mock import patch
import subprocess
BaseClient.setup_logger()
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(SimpleTestCase):
def setUp(self):
self.build_root = "/foo/bar"
os.environ["BUILD_ROOT"] = self.build_root
self.message_q = Queue()
self.command_q = Queue()
def create_runner(self):
client_info = utils.default_client_info()
job_info = utils.create_job_dict()
runner = JobRunner.JobRunner(client_info, job_info, self.message_q, self.command_q)
self.assertEqual(runner.canceled, False)
self.assertEqual(runner.stopped, False)
self.assertEqual(runner.global_env["var_with_root"], "%s/bar" % self.build_root)
self.assertEqual(runner.job_data["steps"][0]["environment"]["step_var_with_root"], "%s/foo" % self.build_root)
return runner
def check_job_results(self, results, runner, complete=True, canceled=False, failed=False):
self.assertEqual(results['complete'], complete)
self.assertEqual(results['canceled'], canceled)
self.assertEqual(results['failed'], failed)
self.assertIn('seconds', results)
self.assertEqual(results['client_name'], runner.client_info["client_name"])
self.assertEqual(self.message_q.qsize(), 1)
msg = self.message_q.get(block=False)
self.assertEqual(len(msg), 4)
server = runner.client_info["server"]
self.assertEqual(msg["server"], server)
self.assertTrue(msg["url"].startswith(server))
self.assertEqual(msg["job_id"], runner.job_data["job_id"])
self.assertEqual(msg["payload"], results)
@patch.object(JobRunner.JobRunner, 'run_step')
def test_run_job(self, mock_run_step):
r = self.create_runner()
run_step_results = {'canceled': False, 'exit_status': 0}
mock_run_step.return_value = run_step_results
# normal run
results = r.run_job()
self.check_job_results(results, r)
# test bad exit_status
run_step_results['exit_status'] = 1
mock_run_step.return_value = run_step_results
self.assertEqual(r.job_data["steps"][0]["abort_on_failure"], True)
results = r.run_job()
self.check_job_results(results, r, failed=True)
# bad exit_status but don't abort
r.job_data["steps"][0]["abort_on_failure"] = False
results = r.run_job()
self.check_job_results(results, r)
# test canceled
r.canceled = True
results = r.run_job()
self.check_job_results(results, r, canceled=True)
# test stopped
r.canceled = False
r.stopped = True
results = r.run_job()
self.check_job_results(results, r)
# test error
r.canceled = False
r.stopped = False
r.error = True
results = r.run_job()
self.check_job_results(results, r, canceled=True, failed=True)
def test_update_step(self):
r = self.create_runner()
step = {'step_num': 1, 'stepresult_id': 1}
chunk_data = {"message": "message"}
for stage in ["start", "complete", "update"]:
chunk_data["message"] = stage
r.update_step(stage, step, chunk_data)
self.assertEqual(self.message_q.qsize(), 1)
msg = self.message_q.get(block=False)
self.assertEqual(len(msg), 4)
server = r.client_info["server"]
self.assertEqual(msg["server"], server)
self.assertTrue(msg["url"].startswith(server))
self.assertIn(stage, msg["url"])
self.assertEqual(msg["job_id"], r.job_data["job_id"])
self.assertEqual(msg["payload"], chunk_data)
def test_get_output_from_queue(self):
r = self.create_runner()
q0 = {"msg": 1}
q1 = {"msg": 2}
self.message_q.put(q0)
self.message_q.put(q1)
output = r.get_output_from_queue(self.message_q)
self.assertEqual(len(output), 2)
self.assertEqual(output[0], q0)
self.assertEqual(output[1], q1)
def test_read_command(self):
r = self.create_runner()
# test a command to another job
cmd = {"job_id": r.job_data["job_id"], "command": "cancel"}
# Test cancel command
cmd["job_id"] = r.job_data["job_id"]
self.assertEqual(r.canceled, False)
self.command_q.put(cmd)
r.read_command()
self.assertEqual(r.canceled, True)
# Test stop command
cmd["command"] = "stop"
self.command_q.put(cmd)
r.canceled = False
self.assertEqual(r.stopped, False)
r.read_command()
self.assertEqual(r.stopped, True)
# Test unknown command
cmd["command"] = "unknown"
self.command_q.put(cmd)
r.stopped = False
r.read_command()
self.assertEqual(r.stopped, False)
self.assertEqual(r.canceled, False)
# Test bad command message
self.command_q.put({})
r.read_command()
def test_read_process_output(self):
r = self.create_runner()
r.client_info["update_step_time"] = 1
with JobRunner.temp_file() as script_file:
script = b"for i in $(seq 5);do echo start $i; sleep 1; echo done $i; done"
script_file.write(script)
script_file.close()
with open(os.devnull, "wb") as devnull:
proc = r.create_process(script_file.name, {}, devnull)
# standard run of the subprocess, just check we get all the output
out = r.read_process_output(proc, r.job_data["steps"][0], {})
proc.wait()
test_out = ""
self.assertGreater(self.message_q.qsize(), 3)
msg_list = []
try:
while True:
l = self.message_q.get(block=False)
msg_list.append(l)
except Empty:
pass
for i in range(1, 6):
start = "start {}\n".format(i)
done = "done {}\n".format(i)
if i < 4:
# only do this test for the first few
# since there is no guarentee that update_step()
# will get called for all of them before the
# process terminates
found_start = False
found_done = False
for msg in msg_list:
if start.strip() in msg["payload"]["output"]:
found_start = True
if done.strip() in msg["payload"]["output"]:
found_done = True
self.assertTrue(found_start)
self.assertTrue(found_done)
test_out += start + done
self.assertEqual(test_out, out["output"])
self.assertEqual(out["complete"], True)
self.assertGreater(out["time"], 1)
proc = r.create_process(script_file.name, {}, devnull)
# Test cancel while reading output
self.command_q.put({"job_id": r.job_data["job_id"], "command": "cancel"})
self.assertEqual(r.canceled, False)
r.read_process_output(proc, r.job_data["steps"][0], {})
proc.wait()
self.assertEqual(r.canceled, True)
def test_kill_job(self):
with JobRunner.temp_file() as script:
script.write(b"sleep 30")
script.close()
with open(os.devnull, "wb") as devnull:
r = self.create_runner()
proc = r.create_process(script.name, {}, devnull)
r.kill_job(proc)
self.assertEqual(proc.poll(), -15) # SIGTERM
proc.wait()
# get some coverage when the proc is already dead
r.kill_job(proc)
# the kill path for windows is different, just get some
# coverage because we don't currently have a windows box
# to test on
with patch.object(platform, 'system') as mock_system:
mock_system.side_effect = ["linux", "Windows"]
proc = r.create_process(script.name, {}, devnull)
r.kill_job(proc)
with patch.object(spawn, 'find_executable') as mock_find:
mock_system.side_effect = ["Windows"]
mock_find.return_value = True
r.kill_job(proc)
# mimic not being able to kill the job
with patch.object(subprocess.Popen, 'poll') as mock_poll, patch.object(subprocess.Popen, 'kill') as mock_kill:
mock_poll.side_effect = [True, None, None]
mock_kill.return_value = False
proc = r.create_process(script.name, {}, devnull)
r.kill_job(proc)
def test_run_step(self):
r = self.create_runner()
r.client_info["update_step_time"] = 1
step_env_orig = r.job_data["steps"][0]["environment"].copy()
global_env_orig = r.global_env.copy()
results = r.run_step(r.job_data["steps"][0])
self.assertIn('test_output1', results['output'])
self.assertIn('test_output2', results['output'])
self.assertEqual(results['exit_status'], 0)
self.assertEqual(results['canceled'], False)
self.assertGreater(results['time'], 1)
# Make sure run_step doesn't touch the environment
self.assertEqual(r.global_env, global_env_orig)
self.assertEqual(r.job_data["steps"][0]["environment"], step_env_orig)
# Test output size limits work
r.max_output_size = 10
results = r.run_step(r.job_data["steps"][0])
self.assertIn('command not found', results['output'])
self.assertIn('Output size exceeded limit', results['output'])
self.command_q.put({"job_id": r.job_data["job_id"], "command": "cancel"})
results = r.run_step(r.job_data["steps"][0])
self.assertEqual(results['canceled'], True)
self.assertEqual(r.canceled, True)
# just get some coverage
with patch.object(JobRunner.JobRunner, "read_process_output") as mock_proc:
r.canceled = False
mock_proc.side_effect = Exception("Oh no!")
results = r.run_step(r.job_data["steps"][0])
self.assertEqual(results['canceled'], True)
self.assertEqual(r.canceled, True)
# Simulate out of disk space error
with patch.object(JobRunner.JobRunner, "run_step_process") as mock_run:
r.canceled = False
mock_run.side_effect = IOError("Oh no!")
results = r.run_step(r.job_data["steps"][0])
self.assertEqual(results['exit_status'], 1)
self.assertEqual(r.canceled, False)
self.assertEqual(r.error, True)
@patch.object(platform, 'system')
def test_run_step_platform(self, mock_system):
r = self.create_runner()
r.client_info["update_step_time"] = 1
# Don't have a Windows box to test on but
# we can get some basic coverage
mock_system.return_value = "Windows"
# the windows command won't work
data = r.run_step(r.job_data["steps"][0])
self.assertEqual(data["time"], 0)
self.assertEqual(r.stopped, True)
def test_env_dict(self):
r = self.create_runner()
env = {"name": "value", "other": "value"}
new_env = r.env_to_dict(env)
self.assertEqual(env, new_env)
r.env_to_dict([("name", "value"), ("other", "value")])
self.assertEqual(env, new_env)
new_env = r.env_to_dict(("name", "value"))
self.assertEqual({}, new_env)
env["another"] = "BUILD_ROOT/foo"
test_env = env.copy()
r.clean_env(test_env)
self.assertEqual(test_env["another"], "%s/foo" % self.build_root)
test_env = env.copy()
del os.environ["BUILD_ROOT"]
r.clean_env(test_env)
self.assertEqual(test_env["another"], "%s/foo" % os.getcwd())
def test_max_step_time(self):
with JobRunner.temp_file() as script:
script.write(b"sleep 30")
script.close()
with open(os.devnull, "wb") as devnull:
r = self.create_runner()
r.max_step_time = 2
proc = r.create_process(script.name, {}, devnull)
out = r.read_process_output(proc, r.job_data["steps"][0], {})
self.assertIn("taking longer than the max", out["output"])
self.assertLess(out["time"], 10)
self.assertEqual(out["canceled"], True)
| apache-2.0 | -682,032,381,414,663,000 | 39.789474 | 126 | 0.566523 | false |
claws/AutobahnPython | autobahn/autobahn/websocket/http.py | 35 | 19607 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
##
## HTTP Status Codes
##
## Source: http://en.wikipedia.org/wiki/List_of_HTTP_status_codes
## Adapted on 2011/10/11
##
##
## 1xx Informational
##
## Request received, continuing process.
##
## This class of status code indicates a provisional response, consisting only of
## the Status-Line and optional headers, and is terminated by an empty line.
## Since HTTP/1.0 did not define any 1xx status codes, servers must not send
## a 1xx response to an HTTP/1.0 client except under experimental conditions.
##
CONTINUE = (100, "Continue",
"This means that the server has received the request headers, and that the client should proceed to send the request body (in the case of a request for which a body needs to be sent; for example, a POST request). If the request body is large, sending it to a server when a request has already been rejected based upon inappropriate headers is inefficient. To have a server check if the request could be accepted based on the request's headers alone, a client must send Expect: 100-continue as a header in its initial request[2] and check if a 100 Continue status code is received in response before continuing (or receive 417 Expectation Failed and not continue).")
SWITCHING_PROTOCOLS = (101, "Switching Protocols",
"This means the requester has asked the server to switch protocols and the server is acknowledging that it will do so.")
PROCESSING = (102, "Processing (WebDAV) (RFC 2518)",
"As a WebDAV request may contain many sub-requests involving file operations, it may take a long time to complete the request. This code indicates that the server has received and is processing the request, but no response is available yet.[3] This prevents the client from timing out and assuming the request was lost.")
CHECKPOINT = (103, "Checkpoint",
"This code is used in the Resumable HTTP Requests Proposal to resume aborted PUT or POST requests.")
REQUEST_URI_TOO_LONG = (122, "Request-URI too long",
"This is a non-standard IE7-only code which means the URI is longer than a maximum of 2083 characters.[5][6] (See code 414.)")
##
## 2xx Success
##
## This class of status codes indicates the action requested by the client was
## received, understood, accepted and processed successfully.
##
OK = (200, "OK",
"Standard response for successful HTTP requests. The actual response will depend on the request method used. In a GET request, the response will contain an entity corresponding to the requested resource. In a POST request the response will contain an entity describing or containing the result of the action.")
CREATED = (201, "Created",
"The request has been fulfilled and resulted in a new resource being created.")
ACCEPTED = (202, "Accepted",
"The request has been accepted for processing, but the processing has not been completed. The request might or might not eventually be acted upon, as it might be disallowed when processing actually takes place.")
NON_AUTHORATIVE = (203, "Non-Authoritative Information (since HTTP/1.1)",
"The server successfully processed the request, but is returning information that may be from another source.")
NO_CONTENT = (204, "No Content",
"The server successfully processed the request, but is not returning any content.")
RESET_CONTENT = (205, "Reset Content",
"The server successfully processed the request, but is not returning any content. Unlike a 204 response, this response requires that the requester reset the document view.")
PARTIAL_CONTENT = (206, "Partial Content",
"The server is delivering only part of the resource due to a range header sent by the client. The range header is used by tools like wget to enable resuming of interrupted downloads, or split a download into multiple simultaneous streams.")
MULTI_STATUS = (207, "Multi-Status (WebDAV) (RFC 4918)",
"The message body that follows is an XML message and can contain a number of separate response codes, depending on how many sub-requests were made.")
IM_USED = (226, "IM Used (RFC 3229)",
"The server has fulfilled a GET request for the resource, and the response is a representation of the result of one or more instance-manipulations applied to the current instance.")
##
## 3xx Redirection
##
## The client must take additional action to complete the request.
##
## This class of status code indicates that further action needs to be taken
## by the user agent in order to fulfil the request. The action required may
## be carried out by the user agent without interaction with the user if and
## only if the method used in the second request is GET or HEAD. A user agent
## should not automatically redirect a request more than five times, since such
## redirections usually indicate an infinite loop.
##
MULTIPLE_CHOICES = (300, "Multiple Choices",
"Indicates multiple options for the resource that the client may follow. It, for instance, could be used to present different format options for video, list files with different extensions, or word sense disambiguation.")
MOVED_PERMANENTLY = (301, "Moved Permanently",
"This and all future requests should be directed to the given URI.")
FOUND = (302, "Found",
"This is an example of industrial practice contradicting the standard. HTTP/1.0 specification (RFC 1945) required the client to perform a temporary redirect (the original describing phrase was 'Moved Temporarily', but popular browsers implemented 302 with the functionality of a 303 See Other. Therefore, HTTP/1.1 added status codes 303 and 307 to distinguish between the two behaviours. However, some Web applications and frameworks use the 302 status code as if it were the 303.")
SEE_OTHER = (303, "See Other (since HTTP/1.1)",
"The response to the request can be found under another URI using a GET method. When received in response to a POST (or PUT/DELETE), it should be assumed that the server has received the data and the redirect should be issued with a separate GET message.")
NOT_MODIFIED = (304, "Not Modified",
"Indicates the resource has not been modified since last requested.[2] Typically, the HTTP client provides a header like the If-Modified-Since header to provide a time against which to compare. Using this saves bandwidth and reprocessing on both the server and client, as only the header data must be sent and received in comparison to the entirety of the page being re-processed by the server, then sent again using more bandwidth of the server and client.")
USE_PROXY = (305, "Use Proxy (since HTTP/1.1)",
"Many HTTP clients (such as Mozilla[11] and Internet Explorer) do not correctly handle responses with this status code, primarily for security reasons.")
SWITCH_PROXY = (306, "Switch Proxy",
"No longer used. Originally meant 'Subsequent requests should use the specified proxy'.")
TEMPORARY_REDIRECT = (307, "Temporary Redirect (since HTTP/1.1)",
"In this occasion, the request should be repeated with another URI, but future requests can still use the original URI.[2] In contrast to 303, the request method should not be changed when reissuing the original request. For instance, a POST request must be repeated using another POST request.")
RESUME_INCOMPLETE = (308, "Resume Incomplete",
"This code is used in the Resumable HTTP Requests Proposal to resume aborted PUT or POST requests.")
##
## 4xx Client Error
##
## The 4xx class of status code is intended for cases in which the client
## seems to have erred. Except when responding to a HEAD request, the server
## should include an entity containing an explanation of the error situation,
## and whether it is a temporary or permanent condition. These status codes are
## applicable to any request method. User agents should display any included
## entity to the user. These are typically the most common error codes
## encountered while online.
##
BAD_REQUEST = (400, "Bad Request",
"The request cannot be fulfilled due to bad syntax.")
UNAUTHORIZED = (401, "Unauthorized",
"Similar to 403 Forbidden, but specifically for use when authentication is possible but has failed or not yet been provided.[2] The response must include a WWW-Authenticate header field containing a challenge applicable to the requested resource. See Basic access authentication and Digest access authentication.")
PAYMENT_REQUIRED = (402, "Payment Required",
"Reserved for future use.[2] The original intention was that this code might be used as part of some form of digital cash or micropayment scheme, but that has not happened, and this code is not usually used. As an example of its use, however, Apple's MobileMe service generates a 402 error if the MobileMe account is delinquent.")
FORBIDDEN = (403, "Forbidden",
"The request was a legal request, but the server is refusing to respond to it.[2] Unlike a 401 Unauthorized response, authenticating will make no difference.[2]")
NOT_FOUND = (404, "Not Found",
"The requested resource could not be found but may be available again in the future.[2] Subsequent requests by the client are permissible.")
METHOD_NOT_ALLOWED = (405, "Method Not Allowed",
"A request was made of a resource using a request method not supported by that resource;[2] for example, using GET on a form which requires data to be presented via POST, or using PUT on a read-only resource.")
NOT_ACCEPTABLE = (406, "Not Acceptable",
"The requested resource is only capable of generating content not acceptable according to the Accept headers sent in the request.")
PROXY_AUTH_REQUIRED = (407, "Proxy Authentication Required",
"The client must first authenticate itself with the proxy.")
REQUEST_TIMEOUT = (408, "Request Timeout",
"The server timed out waiting for the request. According to W3 HTTP specifications: 'The client did not produce a request within the time that the server was prepared to wait. The client MAY repeat the request without modifications at any later time.'")
CONFLICT = (409, "Conflict",
"Indicates that the request could not be processed because of conflict in the request, such as an edit conflict.")
GONE = (410, "Gone",
"Indicates that the resource requested is no longer available and will not be available again.[2] This should be used when a resource has been intentionally removed and the resource should be purged. Upon receiving a 410 status code, the client should not request the resource again in the future. Clients such as search engines should remove the resource from their indices. Most use cases do not require clients and search engines to purge the resource, and a '404 Not Found' may be used instead.")
LENGTH_REQUIRED = (411, "Length Required",
"The request did not specify the length of its content, which is required by the requested resource.")
PRECONDITION_FAILED = (412, "Precondition Failed",
"The server does not meet one of the preconditions that the requester put on the request.")
REQUEST_ENTITY_TOO_LARGE = (413, "Request Entity Too Large",
"The request is larger than the server is willing or able to process.")
REQUEST_URI_TOO_LARGE = (414, "Request-URI Too Long",
"The URI provided was too long for the server to process.")
UNSUPPORTED_MEDIA_TYPE = (415, "Unsupported Media Type",
"The request entity has a media type which the server or resource does not support. For example, the client uploads an image as image/svg+xml, but the server requires that images use a different format.")
INVALID_REQUEST_RANGE = (416, "Requested Range Not Satisfiable",
"The client has asked for a portion of the file, but the server cannot supply that portion.[2] For example, if the client asked for a part of the file that lies beyond the end of the file.")
EXPECTATION_FAILED = (417, "Expectation Failed",
"The server cannot meet the requirements of the Expect request-header field.")
TEAPOT = (418, "I'm a teapot (RFC 2324)",
"This code was defined in 1998 as one of the traditional IETF April Fools' jokes, in RFC 2324, Hyper Text Coffee Pot Control Protocol, and is not expected to be implemented by actual HTTP servers.")
UNPROCESSABLE_ENTITY = (422, "Unprocessable Entity (WebDAV) (RFC 4918)",
"The request was well-formed but was unable to be followed due to semantic errors.")
LOCKED = (423, "Locked (WebDAV) (RFC 4918)",
"The resource that is being accessed is locked.")
FAILED_DEPENDENCY = (424, "Failed Dependency (WebDAV) (RFC 4918)",
"The request failed due to failure of a previous request (e.g. a PROPPATCH).")
UNORDERED_COLLECTION = (425, "Unordered Collection (RFC 3648)",
"Defined in drafts of 'WebDAV Advanced Collections Protocol', but not present in 'Web Distributed Authoring and Versioning (WebDAV) Ordered Collections Protocol'.")
UPGRADE_REQUIRED = (426, "Upgrade Required (RFC 2817)",
"The client should switch to a different protocol such as TLS/1.0.")
NO_RESPONSE = (444, "No Response",
"A Nginx HTTP server extension. The server returns no information to the client and closes the connection (useful as a deterrent for malware).")
RETRY_WITH = (449, "Retry With",
"A Microsoft extension. The request should be retried after performing the appropriate action.")
PARANTAL_BLOCKED = (450, "Blocked by Windows Parental Controls",
"A Microsoft extension. This error is given when Windows Parental Controls are turned on and are blocking access to the given webpage.")
CLIENT_CLOSED_REQUEST = (499, "Client Closed Request",
"An Nginx HTTP server extension. This code is introduced to log the case when the connection is closed by client while HTTP server is processing its request, making server unable to send the HTTP header back.")
##
## 5xx Server Error
##
## The server failed to fulfill an apparently valid request.
##
## Response status codes beginning with the digit "5" indicate cases in which
## the server is aware that it has encountered an error or is otherwise incapable
## of performing the request. Except when responding to a HEAD request, the server
## should include an entity containing an explanation of the error situation, and
## indicate whether it is a temporary or permanent condition. Likewise, user agents
## should display any included entity to the user. These response codes are
## applicable to any request method.
##
INTERNAL_SERVER_ERROR = (500, "Internal Server Error",
"A generic error message, given when no more specific message is suitable.")
NOT_IMPLEMENTED = (501, "Not Implemented",
"The server either does not recognise the request method, or it lacks the ability to fulfill the request.")
BAD_GATEWAY = (502, "Bad Gateway",
"The server was acting as a gateway or proxy and received an invalid response from the upstream server.")
SERVICE_UNAVAILABLE = (503, "Service Unavailable",
"The server is currently unavailable (because it is overloaded or down for maintenance). Generally, this is a temporary state.")
GATEWAY_TIMEOUT = (504, "Gateway Timeout",
"The server was acting as a gateway or proxy and did not receive a timely response from the upstream server.")
UNSUPPORTED_HTTP_VERSION = (505, "HTTP Version Not Supported",
"The server does not support the HTTP protocol version used in the request.")
VARIANT_ALSO_NEGOTIATES = (506, "Variant Also Negotiates (RFC 2295)",
"Transparent content negotiation for the request results in a circular reference.")
INSUFFICIENT_STORAGE = (507, "Insufficient Storage (WebDAV)(RFC 4918)",
"The server is unable to store the representation needed to complete the request.")
BANDWIDTH_LIMIT_EXCEEDED = (509, "Bandwidth Limit Exceeded (Apache bw/limited extension)",
"This status code, while used by many servers, is not specified in any RFCs.")
NOT_EXTENDED = (510, "Not Extended (RFC 2774)",
"Further extensions to the request are required for the server to fulfill it.")
NETWORK_READ_TIMEOUT = (598, "Network read timeout error (Informal convention)",
"This status code is not specified in any RFCs, but is used by some HTTP proxies to signal a network read timeout behind the proxy to a client in front of the proxy.")
NETWORK_CONNECT_TIMEOUT = (599, "Network connect timeout error (Informal convention)",
"This status code is not specified in any RFCs, but is used by some HTTP proxies to signal a network connect timeout behind the proxy to a client in front of the proxy.")
class HttpException(Exception):
"""
Throw an instance of this class to deny a WebSocket connection
during handshake in :meth:`autobahn.websocket.protocol.WebSocketServerProtocol.onConnect`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
| apache-2.0 | -3,128,548,720,702,845,400 | 83.150215 | 693 | 0.668741 | false |
nthall/pip | pip/compat/dictconfig.py | 921 | 23096 | # This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import logging.handlers
import re
import sys
import types
from pip._vendor import six
# flake8: noqa
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, six.string_types): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
# The list needs to be sorted so that we can
# avoid disabling child loggers of explicitly
# named loggers. With a sorted list it is easier
# to find the child loggers.
existing.sort()
# We'll keep the list of existing loggers
# which are children of named loggers here...
child_loggers = []
# now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any logging.
# However, don't disable children of named loggers, as that's
# probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
# Name of parameter changed from fmt to format.
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
# Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
# The argument name changed from strm to stream
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
# Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| mit | 7,203,165,168,251,220,000 | 39.877876 | 105 | 0.525416 | false |
pombredanne/libcomps | libcomps/src/python/docs/doc-sources/conf.py | 2 | 8487 | # -*- coding: utf-8 -*-
#
# x documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 9 16:34:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import ctypes
clibcomps = ctypes.cdll.LoadLibrary("/home/jluza/libcomps/libcomps-build/src/libcomps.so.0.1.6")
os.environ['LD_LIBRARY_PATH'] = "%s" % "/home/jluza/libcomps/libcomps-build/src"
print os.environ['LD_LIBRARY_PATH']
sys.path.insert(0, os.path.abspath("/home/jluza/libcomps/libcomps-build/src/python/src/python2"))
import libcomps
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libcomps'
copyright = u'RedHat 2013'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.6'
# The full version, including alpha/beta/rc tags.
release = ("0." "1."
"6-" "9")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
autodoc_member_order = "groupwise"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'x.tex', u'x Documentation',
u'x', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'x', u'x Documentation',
[u'x'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'x', u'x Documentation',
u'x', 'x', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
def skip(app, what, name, obj, skip, options):
if what == "module" and type(obj).__name__ == "builtin_function_or_method":
return False
if name == "__init__":
return type(obj).__name__ == "wrapper_descriptor"
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# Example configuration for intersphinx: refer to the Python standard library.
| gpl-2.0 | 7,757,494,753,393,517,000 | 31.026415 | 121 | 0.699187 | false |
Health123/ansible | lib/ansible/utils/module_docs_fragments/cloudstack.py | 85 | 2161 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard cloudstack documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the CloudStack API.
required: false
default: null
api_secret:
description:
- Secret key of the CloudStack API.
required: false
default: null
api_url:
description:
- URL of the CloudStack API e.g. https://cloud.example.com/client/api.
required: false
default: null
api_http_method:
description:
- HTTP method used.
required: false
default: 'get'
choices: [ 'get', 'post' ]
api_timeout:
description:
- HTTP timeout.
required: false
default: 10
requirements:
- "python >= 2.6"
- cs
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
provided by the options C(api_url), C(api_key), C(api_secret).
Configuration is read from several locations, in the following order.
- The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
- A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- A C(cloudstack.ini) file in the current working directory.
- A C(.cloudstack.ini) file in the users home directory.
See https://github.com/exoscale/cs for more information.
- This module supports check mode.
'''
| gpl-3.0 | 1,994,771,917,855,923,200 | 32.230769 | 80 | 0.7 | false |
go-smart/glossia-quickstart | code/problem.py | 1 | 13906 | """This requires CGAL mesher applied to series of surfaces. See readme.txt for details.
"""
from __future__ import print_function
# Use FEniCS for Finite Element
import fenics as d
# Useful to import the derivative separately
from dolfin import dx
# Useful numerical libraries
import numpy as N
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as P
# General tools
import os
import subprocess
import shutil
# UFL
import ufl
# Set interactive plotting on
P.ion()
# Use a separate Python file to declare variables
import variables as v
import vtk_tools
input_mesh = "input"
class IREProblem:
"""class IREProblem()
This represents a Finite Element IRE problem using a similar algorithm to that of ULJ
"""
def __init__(self):
pass
def load(self):
# Convert mesh from MSH to Dolfin-XML
shutil.copyfile("input/%s.msh" % input_mesh, "%s.msh" % input_mesh)
destination_xml = "%s.xml" % input_mesh
subprocess.call(["dolfin-convert", "%s.msh" % input_mesh, destination_xml])
# Load mesh and boundaries
mesh = d.Mesh(destination_xml)
self.patches = d.MeshFunction("size_t", mesh, "%s_facet_region.xml" % input_mesh)
self.subdomains = d.MeshFunction("size_t", mesh, "%s_physical_region.xml" % input_mesh)
# Define differential over subdomains
self.dxs = d.dx[self.subdomains]
# Turn subdomains into a Numpy array
self.subdomains_array = N.asarray(self.subdomains.array(), dtype=N.int32)
# Create a map from subdomain indices to tissues
self.tissues_by_subdomain = {}
for i, t in v.tissues.items():
print(i, t)
for j in t["indices"]:
self.tissues_by_subdomain[j] = t
self.mesh = mesh
self.setup_fe()
self.prepare_increase_conductivity()
def load_patient_data(self):
indicators = {}
for subdomain in ("liver", "vessels", "tumour"):
values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='uintp')
for i in range(0, v.dim_depth):
slice = N.loadtxt(os.path.join(
v.patient_data_location,
"patient-%s.%d.txt" % (subdomain, i + 1))
)
values[:, :, i] = slice.astype('uintp')
indicators[subdomain] = values
self.indicators = indicators
def interpolate_to_patient_data(self, function, indicator):
values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='float')
it = N.nditer(values, flags=['multi_index'])
u = N.empty((1,))
x = N.empty((3,))
delta = (v.delta_height, v.delta_width, v.delta_depth)
offset = (v.offset_x, v.offset_y, v.offset_z)
while not it.finished:
if indicator[it.multi_index] != 1:
it.iternext()
continue
x[0] = it.multi_index[1] * delta[1] - offset[0]
x[1] = it.multi_index[0] * delta[0] - offset[1]
x[2] = it.multi_index[2] * delta[2] - offset[2]
function.eval(u, x)
values[...] = u[0]
it.iternext()
return values
def setup_fe(self):
# Define the relevant function spaces
V = d.FunctionSpace(self.mesh, "Lagrange", 1)
self.V = V
# DG0 is useful for defining piecewise constant functions
DV = d.FunctionSpace(self.mesh, "Discontinuous Lagrange", 0)
self.DV = DV
# Define test and trial functions for FE
self.z = d.TrialFunction(self.V)
self.w = d.TestFunction(self.V)
def per_tissue_constant(self, generator):
fefunction = d.Function(self.DV)
generated_values = dict((l, generator(l)) for l in N.unique(self.subdomains_array))
vector = N.vectorize(generated_values.get)
fefunction.vector()[:] = vector(self.subdomains_array)
return fefunction
def get_tumour_volume(self):
# Perhaps there is a prettier way, but integrate a unit function over the tumour tets
one = d.Function(self.V)
one.vector()[:] = 1
return sum(d.assemble(one * self.dxs(i)) for i in v.tissues["tumour"]["indices"])
def save_lesion(self):
final_filename = "results/%s-max_e%06d.vtu" % (input_mesh, self.max_e_count)
shutil.copyfile(final_filename, "../lesion_volume.vtu")
destination = "../lesion_surface.vtp"
vtk_tools.save_lesion(destination, final_filename, "max_E", (80, None))
print("Output file to %s?" % destination, os.path.exists(destination))
def solve(self):
# TODO: when FEniCS ported to Python3, this should be exist_ok
try:
os.makedirs('results')
except OSError:
pass
z, w = (self.z, self.w)
u0 = d.Constant(0.0)
# Define the linear and bilinear forms
L = u0 * w * dx
# Define useful functions
cond = d.Function(self.DV)
U = d.Function(self.V)
# Initialize the max_e vector, that will store the cumulative max e values
max_e = d.Function(self.V)
max_e.vector()[:] = 0.0
max_e.rename("max_E", "Maximum energy deposition by location")
max_e_file = d.File("results/%s-max_e.pvd" % input_mesh)
max_e_per_step = d.Function(self.V)
max_e_per_step_file = d.File("results/%s-max_e_per_step.pvd" % input_mesh)
self.es = {}
self.max_es = {}
fi = d.File("results/%s-cond.pvd" % input_mesh)
potential_file = d.File("results/%s-potential.pvd" % input_mesh)
# Loop through the voltages and electrode combinations
for i, (anode, cathode, voltage) in enumerate(v.electrode_triples):
print("Electrodes %d (%lf) -> %d (0)" % (anode, voltage, cathode))
cond = d.project(self.sigma_start, V=self.DV)
# Define the Dirichlet boundary conditions on the active needles
uV = d.Constant(voltage)
term1_bc = d.DirichletBC(self.V, uV, self.patches, v.needles[anode])
term2_bc = d.DirichletBC(self.V, u0, self.patches, v.needles[cathode])
e = d.Function(self.V)
e.vector()[:] = max_e.vector()
# Re-evaluate conductivity
self.increase_conductivity(cond, e)
for j in range(v.max_restarts):
# Update the bilinear form
a = d.inner(d.nabla_grad(z), cond * d.nabla_grad(w)) * dx
# Solve again
print(" [solving...")
d.solve(a == L, U, bcs=[term1_bc, term2_bc])
print(" ....solved]")
# Extract electric field norm
for k in range(len(U.vector())):
if N.isnan(U.vector()[k]):
U.vector()[k] = 1e5
e_new = d.project(d.sqrt(d.dot(d.grad(U), d.grad(U))), self.V)
# Take the max of the new field and the established electric field
e.vector()[:] = N.array([max(*X) for X in zip(e.vector(), e_new.vector())])
# Re-evaluate conductivity
fi << cond
self.increase_conductivity(cond, e)
potential_file << U
# Save the max e function to a VTU
max_e_per_step.vector()[:] = e.vector()[:]
max_e_per_step_file << max_e_per_step
# Store this electric field norm, for this triple, for later reference
self.es[i] = e
# Store the max of this electric field norm and that for all previous triples
max_e_array = N.array([max(*X) for X in zip(max_e.vector(), e.vector())])
max_e.vector()[:] = max_e_array
# Create a new max_e function for storage, or it will be overwritten by the next iteration
max_e_new = d.Function(self.V)
max_e_new.vector()[:] = max_e_array
# Store this max e function for the cumulative coverage curve calculation later
self.max_es[i] = max_e_new
# Save the max e function to a VTU
max_e_file << max_e
self.max_e_count = i
def prepare_increase_conductivity(self):
def sigma_function(l, i):
s = self.tissues_by_subdomain[l]["sigma"]
if isinstance(s, list):
return s[i]
else:
return s
def threshold_function(l, i):
s = self.tissues_by_subdomain[l]["sigma"]
if isinstance(s, list):
return self.tissues_by_subdomain[l][i]
else:
return 1 if i == "threshold reversible" else 0
self.sigma_start = self.per_tissue_constant(lambda l: sigma_function(l, 0))
self.sigma_end = self.per_tissue_constant(lambda l: sigma_function(l, 1))
self.threshold_reversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold reversible"))
self.threshold_irreversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold irreversible"))
self.k = (self.sigma_end - self.sigma_start) / (self.threshold_irreversible - self.threshold_reversible)
self.h = self.sigma_start - self.k * self.threshold_reversible
def increase_conductivity(self, cond, e):
# Set up the three way choice function
intermediate = e * self.k + self.h
not_less_than = ufl.conditional(ufl.gt(e, self.threshold_irreversible), self.sigma_end, intermediate)
cond_expression = ufl.conditional(ufl.lt(e, self.threshold_reversible), self.sigma_start, not_less_than)
# Project this onto the function space
cond_function = d.project(ufl.Max(cond_expression, cond), cond.function_space())
cond.assign(cond_function)
def plot_bitmap_result(self):
# Create a horizontal axis
cc_haxis = N.linspace(5000, 1e5, 200)
# Import the binary data indicating the location of structures
self.load_patient_data()
# Calculate the tumour volume; this is what we will compare against
tumour_volume = (self.indicators["tumour"] == 1).sum()
# Initialize the output_arrays vector a rescale the x to V/cm
output_arrays = [cc_haxis / 100]
# Loop through the electrode triples
for i, triple in enumerate(v.electrode_triples):
# Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF
e_dg = self.interpolate_to_patient_data(self.max_es[i], self.indicators["tumour"])
# Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume)
elim = N.vectorize(lambda x: (e_dg > x).sum() / tumour_volume)
output_arrays.append(elim(cc_haxis))
# Compile into a convenient array
output = N.array(zip(*output_arrays))
# Output cumulative coverage curves as CSV
N.savetxt('results/%s-coverage_curves_bitmap.csv' % input_mesh, output)
# Plot the coverage curves
for (anode, cathode, voltage), a in zip(v.electrode_triples, output_arrays[1:]):
P.plot(output_arrays[0], a, label="%d - %d" % (anode, cathode))
# Draw the plot
P.draw()
P.title(r"Bitmap-based")
P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)")
P.ylabel(r"Fraction of tumour beneath level")
# Show a legend for the plot
P.legend(loc=3)
# Display the plot
P.show(block=True)
def plot_result(self):
# Calculate preliminary relationships
dofmap = self.DV.dofmap()
cell_dofs = N.array([dofmap.cell_dofs(c)[0] for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])])
volumes = N.array([d.Cell(self.mesh, c).volume() for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])])
# Create a horizontal axis
cc_haxis = N.linspace(5000, 1e5, 200)
# Calculate the tumour volume; this is what we will compare against
tumour_volume = self.get_tumour_volume()
# Initialize the output_arrays vector a rescale the x to V/cm
output_arrays = [cc_haxis / 100]
# Loop through the electrode pairs
for i, triple in enumerate(v.electrode_triples):
# Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF
e_dg = d.project(self.max_es[i], self.DV)
# Calculate the "max e" contribution for each cell
contributor = N.vectorize(lambda c: e_dg.vector()[c])
contributions = contributor(cell_dofs)
# Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume)
elim = N.vectorize(lambda x: volumes[contributions > x].sum() / tumour_volume)
output_arrays.append(elim(cc_haxis))
# Compile into a convenient array
output = N.array(zip(*output_arrays))
# Output cumulative coverage curves as CSV
N.savetxt('results/%s-coverage_curves.csv' % input_mesh, output)
# Plot the coverage curves
for (anode, cathode, voltage), a in zip(v.electrode_triples, output_arrays[1:]):
P.plot(output_arrays[0], a, label="%d - %d" % (anode, cathode))
# Draw the plot
P.draw()
P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)")
P.ylabel(r"Fraction of tumour beneath level")
# Show a legend for the plot
P.legend(loc=3)
# Display the plot
P.savefig('%s-coverage_curves' % input_mesh)
| mit | 5,167,436,385,352,867,000 | 37.30854 | 157 | 0.593629 | false |
restudToolbox/package | development/testing/_modules/auxiliary_reliability.py | 1 | 6085 | from statsmodels.tools.eval_measures import rmse
from copy import deepcopy
import numpy as np
import shlex
import os
from config import SPEC_DIR
import respy
def get_est_log_info():
""" Get the choice probabilities.
"""
with open('est.respy.info') as in_file:
for line in in_file.readlines():
# Split line
list_ = shlex.split(line)
# Skip empty lines
if len(list_) < 4:
continue
if list_[2] == 'Steps':
num_steps = int(list_[3])
if list_[2] == 'Evaluations':
num_evals = int(list_[3])
# Finishing
return num_evals, num_steps
def run(spec_dict, fname):
""" Run a version of the Monte Carlo exercise.
"""
dirname = fname.replace('.ini', '')
os.mkdir(dirname)
os.chdir(dirname)
# We first read in the first specification from the initial paper for our
# baseline and process the deviations.
respy_obj = respy.RespyCls(SPEC_DIR + fname)
respy_obj.unlock()
respy_obj.set_attr('file_est', '../truth/start/data.respy.dat')
for key_ in spec_dict.keys():
respy_obj.set_attr(key_, spec_dict[key_])
if respy_obj.attr['num_procs'] > 1:
respy_obj.set_attr('is_parallel', True)
else:
respy_obj.set_attr('is_parallel', False)
respy_obj.lock()
maxfun = respy_obj.get_attr('maxfun')
# Let us first simulate a baseline sample, store the results for future
# reference, and start an estimation from the true values.
os.mkdir('truth')
os.chdir('truth')
respy_obj.write_out()
simulate_specification(respy_obj, 'start', False)
x, _ = respy.estimate(respy_obj)
simulate_specification(respy_obj, 'stop', True, x)
rmse_start, rmse_stop = get_rmse()
num_evals, num_steps = get_est_log_info()
os.chdir('../')
record_results('Truth', rmse_start, rmse_stop, num_evals, num_steps, maxfun)
# Now we will estimate a misspecified model on this dataset assuming that
# agents are myopic. This will serve as a form of well behaved starting
# values for the real estimation to follow.
respy_obj.unlock()
respy_obj.set_attr('delta', 0.00)
respy_obj.lock()
os.mkdir('static')
os.chdir('static')
respy_obj.write_out()
simulate_specification(respy_obj, 'start', False)
x, _ = respy.estimate(respy_obj)
simulate_specification(respy_obj, 'stop', True, x)
rmse_start, rmse_stop = get_rmse()
num_evals, num_steps = get_est_log_info()
os.chdir('../')
record_results('Static', rmse_start, rmse_stop, num_evals, num_steps, maxfun)
# # Using the results from the misspecified model as starting values, we see
# # whether we can obtain the initial values.
respy_obj.update_model_paras(x)
respy_obj.unlock()
respy_obj.set_attr('delta', 0.95)
respy_obj.lock()
os.mkdir('dynamic')
os.chdir('dynamic')
respy_obj.write_out()
simulate_specification(respy_obj, 'start', False)
x, _ = respy.estimate(respy_obj)
simulate_specification(respy_obj, 'stop', True, x)
rmse_start, rmse_stop = get_rmse()
num_evals, num_steps = get_est_log_info()
os.chdir('../')
record_results('Dynamic', rmse_start, rmse_stop, num_evals, num_steps,
maxfun)
os.chdir('../')
def get_choice_probabilities(fname, is_flatten=True):
""" Get the choice probabilities.
"""
# Initialize container.
stats = np.tile(np.nan, (0, 4))
with open(fname) as in_file:
for line in in_file.readlines():
# Split line
list_ = shlex.split(line)
# Skip empty lines
if not list_:
continue
# If OUTCOMES is reached, then we are done for good.
if list_[0] == 'Outcomes':
break
# Any lines that do not have an integer as their first element
# are not of interest.
try:
int(list_[0])
except ValueError:
continue
# All lines that make it down here are relevant.
stats = np.vstack((stats, [float(x) for x in list_[1:]]))
# Return all statistics as a flattened array.
if is_flatten:
stats = stats.flatten()
# Finishing
return stats
def record_results(label, rmse_start, rmse_stop, num_evals, num_steps, maxfun):
with open('reliability.respy.info', 'a') as out_file:
# Setting up
if label == 'Truth':
out_file.write('\n RMSE\n\n')
fmt = '{:>15} {:>15} {:>15} {:>15} {:>15}\n\n'
out_file.write(fmt.format(*['Setup', 'Start', 'Stop', 'Evals', 'Steps']))
fmt = '{:>15} {:15.10f} {:15.10f} {:15} {:15}\n'
out_file.write(fmt.format(*[label, rmse_start, rmse_stop, num_evals, num_steps]))
# Add information on maximum allowed evaluations
if label == 'Dynamic':
fmt = '\n{:>15} {:<15} {:15}\n'
out_file.write(fmt.format(*['Maximum', 'Evaluations', maxfun]))
def get_rmse():
""" Compute the RMSE based on the relevant parameterization.
"""
fname = '../truth/start/data.respy.info'
probs_true = get_choice_probabilities(fname, is_flatten=True)
fname = 'start/data.respy.info'
probs_start = get_choice_probabilities(fname, is_flatten=True)
fname = 'stop/data.respy.info'
probs_stop = get_choice_probabilities(fname, is_flatten=True)
rmse_stop = rmse(probs_stop, probs_true)
rmse_start = rmse(probs_start, probs_true)
return rmse_start, rmse_stop
def simulate_specification(respy_obj, subdir, update, paras=None):
""" Simulate results to assess the estimation performance. Note that we do
not update the object that is passed in.
"""
os.mkdir(subdir)
os.chdir(subdir)
respy_copy = deepcopy(respy_obj)
if update:
assert (paras is not None)
respy_copy.update_model_paras(paras)
respy_copy.write_out()
respy.simulate(respy_copy)
os.chdir('../')
| mit | -5,061,372,821,688,122,000 | 26.912844 | 89 | 0.601972 | false |
yesho/MITMf | core/proxyplugins.py | 13 | 4292 | # Copyright (c) 2010-2011 Ben Schmidt, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import sys
import logging
import inspect
import traceback
from core.logger import logger
formatter = logging.Formatter("%(asctime)s [ProxyPlugins] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("ProxyPlugins", formatter)
class ProxyPlugins:
'''
This class does some magic so that all we need to do in
ServerConnection is do a self.plugins.hook() call
and we will call any plugin that implements the function
that it came from with the args passed to the original
function.
To do this, we are probably abusing the inspect module,
and if it turns out to be too slow it can be changed. For
now, it's nice because it makes for very little code needed
to tie us in.
Sadly, propagating changes back to the function is not quite
as easy in all cases :-/ . Right now, changes to local function
vars still have to be set back in the function. This only happens
in handleResponse, but is still annoying.
'''
mthdDict = {"connectionMade" : "request",
"handleStatus" : "responsestatus",
"handleResponse" : "response",
"handleHeader" : "responseheaders",
"handleEndHeaders": "responseheaders"}
plugin_mthds = {}
plugin_list = []
all_plugins = []
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
def set_plugins(self, plugins):
'''Set the plugins in use'''
for p in plugins:
self.add_plugin(p)
log.debug("Loaded {} plugin/s".format(len(plugins)))
def add_plugin(self,p):
'''Load a plugin'''
self.plugin_list.append(p)
log.debug("Adding {} plugin".format(p.name))
for mthd,pmthd in self.mthdDict.iteritems():
try:
self.plugin_mthds[mthd].append(getattr(p,pmthd))
except KeyError:
self.plugin_mthds[mthd] = [getattr(p,pmthd)]
def remove_plugin(self,p):
'''Unload a plugin'''
self.plugin_list.remove(p)
log.debug("Removing {} plugin".format(p.name))
for mthd,pmthd in self.mthdDict.iteritems():
self.plugin_mthds[mthd].remove(p)
def hook(self):
'''Magic to hook various function calls in sslstrip'''
#gets the function name and args of our caller
frame = sys._getframe(1)
fname = frame.f_code.co_name
keys,_,_,values = inspect.getargvalues(frame)
#assumes that no one calls del on an arg :-/
args = {}
for key in keys:
args[key] = values[key]
#prevent self conflict
if (fname == "handleResponse") or (fname == "handleHeader") or (fname == "handleEndHeaders"):
args['request'] = args['self']
args['response'] = args['self'].client
else:
args['request'] = args['self']
del args['self']
log.debug("hooking {}()".format(fname))
#calls any plugin that has this hook
try:
if self.plugin_mthds:
for f in self.plugin_mthds[fname]:
a = f(**args)
if a != None: args = a
except Exception as e:
#This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating)
log.error("Exception occurred in hooked function")
traceback.print_exc()
#pass our changes to the locals back down
return args
| gpl-3.0 | 351,447,927,571,027,600 | 34.471074 | 126 | 0.622088 | false |
kracwarlock/Lasagne | lasagne/tests/layers/test_conv.py | 9 | 18394 | import numpy as np
import pytest
import importlib
import theano
import lasagne
from lasagne.utils import floatX
def conv2d(input, kernel, border_mode):
output = np.zeros((input.shape[0],
kernel.shape[0],
input.shape[2] + kernel.shape[2] - 1,
input.shape[3] + kernel.shape[3] - 1,
))
for i in range(kernel.shape[2]):
for j in range(kernel.shape[3]):
k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]
output[:, :, i:i + input.shape[2],
j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)
if border_mode == 'valid':
trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)
output = output[:, :, trim[0]:-trim[0], trim[1]:-trim[1]]
elif border_mode == 'same':
shift_x = (kernel.shape[2] - 1) // 2
shift_y = (kernel.shape[3] - 1) // 2
output = output[:, :, shift_x:input.shape[2] + shift_x,
shift_y:input.shape[3] + shift_y]
return output
def conv2d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for border_mode in ['valid', 'full', 'same']:
for stride in [1, 2, 3]:
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, 3, 3))
output = conv2d(input, kernel, border_mode=border_mode)
output = output[:, :, ::stride, ::stride]
yield _convert(input, kernel, output, {'border_mode': border_mode,
'stride': stride
})
input = np.random.random((3, 3, 16, 23))
kernel = np.random.random((16, 3, 3, 3))
output = conv2d(input, kernel, border_mode=border_mode)
output = output[:, :, ::stride, ::stride]
yield _convert(input, kernel, output, {'border_mode': border_mode,
'stride': stride
})
# bias-less case
input = np.random.random((3, 1, 16, 23))
kernel = np.random.random((16, 1, 3, 3))
output = conv2d(input, kernel, border_mode='valid')
yield _convert(input, kernel, output, {'b': None})
def conv1d(input, kernel, border_mode='valid'):
output = []
for b in input:
temp = []
for c in kernel:
temp.append(
np.convolve(b[0, :], c[0, :], mode=border_mode))
output.append(temp)
return np.array(output)
def conv1d_test_sets():
def _convert(input, kernel, output, kwargs):
return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]
for border_mode in ['valid', 'full', 'same']:
for stride in [1, 2, 3]:
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, 3))
output = conv1d(input, kernel, border_mode)
output = output[:, :, ::stride]
yield _convert(input, kernel, output, {'border_mode': border_mode,
'stride': stride,
})
# bias-less case
input = np.random.random((3, 1, 23))
kernel = np.random.random((16, 1, 3))
output = conv1d(input, kernel, border_mode='valid')
yield _convert(input, kernel, output, {'b': None})
def test_conv_output_length():
from lasagne.layers.conv import conv_output_length
assert conv_output_length(13, 5, 3, 'valid', 2) == 3
assert conv_output_length(13, 5, 3, 'full', 2) == 6
assert conv_output_length(13, 5, 3, 'same', 2) == 5
assert conv_output_length(13, 5, 3, 'pad', 2) == 5
with pytest.raises(ValueError) as exc:
conv_output_length(13, 5, 3, '_nonexistent_mode', 2)
assert "Invalid border mode" in exc.value.args[0]
@pytest.fixture
def DummyInputLayer():
def factory(shape):
from lasagne.layers.input import InputLayer
return InputLayer(shape)
return factory
class TestConv1DLayer:
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv1d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, w))
try:
from lasagne.layers.conv import Conv1DLayer
layer = Conv1DLayer(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pass
def test_init_none_nonlinearity_bias(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_border_mode(self, DummyInputLayer):
from lasagne.layers.conv import Conv1DLayer
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(RuntimeError) as exc:
layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),
border_mode='_nonexistent_mode')
assert "Invalid border mode" in exc.value.args[0]
class TestConv2DLayerImplementations:
@pytest.fixture(
params=[
('lasagne.layers', 'Conv2DLayer', {}),
('lasagne.layers.cuda_convnet',
'Conv2DCCLayer',
{'flip_filters': True}),
('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}),
('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}),
],
)
def Conv2DImpl(self, request):
impl_module_name, impl_name, impl_default_kwargs = request.param
try:
mod = importlib.import_module(impl_module_name)
except ImportError:
pytest.skip("{} not available".format(impl_module_name))
impl = getattr(mod, impl_name)
def wrapper(*args, **kwargs):
kwargs2 = impl_default_kwargs.copy()
kwargs2.update(kwargs)
return impl(*args, **kwargs2)
wrapper.__name__ = impl_name
return wrapper
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
])
def test_defaults(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs, extra_kwargs):
kwargs.update(extra_kwargs)
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((b, c, h, w))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
@pytest.mark.parametrize(
"input, kernel, output, kwargs", list(conv2d_test_sets()))
def test_with_nones(self, Conv2DImpl, DummyInputLayer,
input, kernel, output, kwargs):
b, c, h, w = input.shape.eval()
input_layer = DummyInputLayer((None, c, None, None))
try:
layer = Conv2DImpl(
input_layer,
num_filters=kernel.shape[0],
filter_size=kernel.shape[2:],
W=kernel,
**kwargs
)
actual = layer.get_output_for(input).eval()
assert layer.output_shape == (None,
kernel.shape[0],
None,
None)
assert actual.shape == output.shape
assert np.allclose(actual, output)
except NotImplementedError:
pytest.skip()
def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3, 3))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
nonlinearity=None, b=None)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_invalid_border_mode(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((1, 2, 3))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),
border_mode='_nonexistent_mode')
assert "Invalid border mode" in exc.value.args[0]
def test_get_params(self, Conv2DImpl, DummyInputLayer):
input_layer = DummyInputLayer((128, 3, 32, 32))
layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3))
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
class TestConv2DDNNLayer:
def test_import_without_gpu_or_cudnn_raises(self):
from theano.sandbox.cuda import dnn
if theano.config.device.startswith("gpu") and dnn.dnn_available():
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.dnn
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.dnn import Conv2DDNNLayer
except ImportError:
pytest.skip("dnn not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DDNNLayer(input_layer, num_filters=1,
filter_size=(3, 3), border_mode='valid',
pad=(1, 1))
assert ("You cannot specify both 'border_mode' and 'pad'" in
exc.value.args[0])
layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DMMLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.corrmm
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.corrmm import Conv2DMMLayer
except ImportError:
pytest.skip("corrmm not available")
input_layer = DummyInputLayer((1, 2, 3, 3))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DMMLayer(input_layer, num_filters=1,
filter_size=(3, 3), border_mode='valid',
pad=(1, 1))
assert ("You cannot specify both 'border_mode' and 'pad'" in
exc.value.args[0])
layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (1, 4, 7, 7)
class TestConv2DCCLayer:
def test_import_without_gpu_raises(self):
if theano.config.device.startswith("gpu"):
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.cuda_convnet
def test_unsupported_settings(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 5))
assert ("Conv2DCCLayer only supports square filters" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), stride=(1, 2))
assert ("Conv2DCCLayer only supports square strides" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=15,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires num_filters to be a multiple of 16" in
exc.value.args[0])
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), pad=(1, 2))
assert ("Conv2DCCLayer only supports square padding" in
exc.value.args[0])
input_layer = DummyInputLayer((128, 7, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3))
assert ("Conv2DCCLayer requires the number of input channels to be "
"1, 2, 3 or a multiple of 4" in exc.value.args[0])
def test_pad(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((128, 3, 32, 32))
with pytest.raises(RuntimeError) as exc:
layer = Conv2DCCLayer(input_layer, num_filters=16,
filter_size=(3, 3), border_mode='valid',
pad=(1, 1))
assert ("You cannot specify both 'border_mode' and 'pad'" in
exc.value.args[0])
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
pad=(3, 3))
assert layer.output_shape == (128, 16, 36, 36)
def test_dimshuffle_false_shapes(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16,)
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, untie_biases=True)
assert layer.W.get_value().shape == (4, 3, 3, 16)
assert layer.b.get_value().shape == (16, 30, 30)
def test_dimshuffle_false_get_output_for(self, DummyInputLayer):
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
except ImportError:
pytest.skip("cuda_convnet not available")
# this implementation is tested against FilterActs instead of
# theano.tensor.nnet.conv.conv2d because using the latter leads to
# numerical precision errors.
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)
input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))
kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))
input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01
layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),
dimshuffle=False, W=kernel, b=None,
nonlinearity=None)
output = np.array(filter_acts(input, kernel).eval())
actual = layer.get_output_for(input).eval()
actual = np.array(actual)
assert actual.shape == output.shape
assert actual.shape == layer.output_shape
assert np.allclose(actual, output)
class TestShuffleLayers:
def test_bc01_to_c01b(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleBC01ToC01BLayer(input_layer)
assert layer.output_shape == (2, 3, 4, 1)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(1, 2, 3, 0)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
def test_c01b_to_bc01(self):
from lasagne.layers.input import InputLayer
try:
from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer
except ImportError:
pytest.skip("cuda_convnet not available")
input_layer = InputLayer((1, 2, 3, 4))
layer = ShuffleC01BToBC01Layer(input_layer)
assert layer.output_shape == (4, 1, 2, 3)
input = floatX(np.random.random((1, 2, 3, 4)))
output = input.transpose(3, 0, 1, 2)
actual = layer.get_output_for(theano.shared(input)).eval()
assert np.allclose(output, actual)
| mit | 4,293,136,424,335,137,000 | 38.219616 | 79 | 0.559204 | false |
UdjinM6/dash | test/functional/feature_help.py | 2 | 1909 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting dashd with -h works as expected."""
import subprocess
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def run_test(self):
self.log.info("Start dashd with -h for help text")
self.nodes[0].start(extra_args=['-h'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# Node should exit immediately and output help to stdout.
ret_code = self.nodes[0].process.wait(timeout=1)
assert_equal(ret_code, 0)
output = self.nodes[0].process.stdout.read()
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.nodes[0].running = False
self.log.info("Start dashd with -version for version information")
self.nodes[0].start(extra_args=['-version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# Node should exit immediately and output version to stdout.
ret_code = self.nodes[0].process.wait(timeout=1)
assert_equal(ret_code, 0)
output = self.nodes[0].process.stdout.read()
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
if __name__ == '__main__':
HelpTest().main()
| mit | -4,326,766,752,787,602,000 | 40.5 | 100 | 0.65846 | false |
richardcs/ansible | lib/ansible/utils/module_docs_fragments/dimensiondata_wait.py | 192 | 1429 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# - Adam Friedman <[email protected]>
class ModuleDocFragment(object):
# Dimension Data ("wait-for-completion" parameters) doc fragment
DOCUMENTATION = '''
options:
wait:
description:
- Should we wait for the task to complete before moving onto the next.
required: false
default: false
wait_time:
description:
- The maximum amount of time (in seconds) to wait for the task to complete.
- Only applicable if I(wait=true).
required: false
default: 600
wait_poll_interval:
description:
- The amount of time (in seconds) to wait between checks for task completion.
- Only applicable if I(wait=true).
required: false
default: 2
'''
| gpl-3.0 | 6,233,215,318,716,246,000 | 30.755556 | 83 | 0.705388 | false |
Perferom/android_kernel_lge_msm7x27-3.0.x | scripts/build-all.py | 1250 | 9474 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 | -6,376,592,524,960,908,000 | 34.350746 | 81 | 0.593519 | false |
level420/iconfont | generate.py | 1 | 4953 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2008 - 2012 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# This is a stub proxy for the real generator.py
##
import sys, os, re, subprocess, codecs, optparse
CMD_PYTHON = sys.executable
QOOXDOO_PATH = '../qooxdoo-master'
QX_PYLIB = "tool/pylib"
##
# A derived OptionParser class that ignores unknown options (The parent
# class raises in those cases, and stops further processing).
# We need this, as we are only interested in -c/--config on this level, and
# want to ignore pot. other options.
#
class IgnoringUnknownOptionParser(optparse.OptionParser):
##
# <rargs> is the raw argument list. The original _process_args mutates
# rargs, processing options into <values> and copying interspersed args
# into <largs>. This overridden version ignores unknown or ambiguous
# options.
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.AmbiguousOptionError):
pass
def parseArgs():
parser = IgnoringUnknownOptionParser(add_help_option=False)
parser.add_option(
"-c", "--config", dest="config", metavar="CFGFILE",
default="config.json", help="path to configuration file"
)
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true",
default=False, help="run in verbose mode"
)
(options, args) = parser.parse_args(sys.argv[1:])
return options, args
ShellOptions, ShellArgs = parseArgs()
# this is from misc.json, duplicated for decoupling
_eolComment = re.compile(r'(?<![a-zA-Z]:)//.*$', re.M) # double $ for string.Template
_mulComment = re.compile(r'/\*.*?\*/', re.S)
def stripComments(s):
b = _eolComment.sub('',s)
b = _mulComment.sub('',b)
return b
def getQxPath():
path = QOOXDOO_PATH
# OS env takes precedence
if os.environ.has_key("QOOXDOO_PATH"):
path = os.environ["QOOXDOO_PATH"]
# else use QOOXDOO_PATH from config.json
else:
config_file = ShellOptions.config
if os.path.exists(config_file):
# try json parsing with qx json
if not path.startswith('${'): # template macro has been resolved
sys.path.insert(0, os.path.join(path, QX_PYLIB))
try:
from misc import json
got_json = True
except:
got_json = False
got_path = False
if got_json:
config_str = codecs.open(config_file, "r", "utf-8").read()
#config_str = stripComments(config_str) # not necessary under demjson
config = json.loads(config_str)
p = config.get("let")
if p:
p = p.get("QOOXDOO_PATH")
if p:
path = p
got_path = True
# regex parsing - error prone
if not got_path:
qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?')
conffile = codecs.open(config_file, "r", "utf-8")
aconffile = conffile.readlines()
for line in aconffile:
mo = qpathr.search(line)
if mo:
path = mo.group(1)
break # assume first occurrence is ok
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path))
return path
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir
qxpath = getQxPath()
REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py')
if not os.path.exists(REAL_GENERATOR):
print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR
sys.exit(1)
elif ShellOptions.verbose:
print "\nInvoking real generator under %s ..." % REAL_GENERATOR
argList = []
argList.append(CMD_PYTHON)
argList.append(REAL_GENERATOR)
argList.extend(sys.argv[1:])
if sys.platform == "win32":
argList1=[]
for arg in argList:
if arg.find(' ')>-1:
argList1.append('"%s"' % arg)
else:
argList1.append(arg)
argList = argList1
else:
argList = ['"%s"' % x for x in argList] # quote argv elements
cmd = " ".join(argList)
try:
retval = subprocess.call(cmd, shell=True)
except:
retval = 3
sys.exit(retval)
| mit | 8,421,166,476,534,527,000 | 31.801325 | 94 | 0.573794 | false |
pitch-sands/i-MPI | flask/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py | 17 | 15432 | # postgresql/psycopg2.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database via the psycopg2 driver.
Driver
------
The psycopg2 driver is available at http://pypi.python.org/pypi/psycopg2/ .
The dialect has several behaviors which are specifically tailored towards compatibility
with this module.
Note that psycopg1 is **not** supported.
Connecting
----------
URLs are of the form
``postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]``.
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* *server_side_cursors* - Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows are
not immediately pre-fetched and buffered after statement execution, but are
instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.base.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement or query).
This includes the options ``SERIALIZABLE``, ``READ COMMITTED``,
``READ UNCOMMITTED`` and ``REPEATABLE READ``.
* stream_results - Enable or disable usage of server side cursors.
If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used.
Unicode
-------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize it's own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False``
to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation:
Transaction Isolation Level
---------------------------
The ``isolation_level`` parameter of :func:`.create_engine` here makes use
psycopg2's ``set_isolation_level()`` connection method, rather than
issuing a ``SET SESSION CHARACTERISTICS`` command. This because psycopg2
resets the isolation level on each new transaction, and needs to know
at the API level what level should be used.
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
"""
import re
import logging
from sqlalchemy import util, exc
from sqlalchemy.util.compat import decimal
from sqlalchemy import processors
from sqlalchemy.engine import base
from sqlalchemy.sql import expression
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(decimal.Decimal)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def __init__(self, *arg, **kw):
super(_PGEnum, self).__init__(*arg, **kw)
# Py2K
if self.convert_unicode:
self.convert_unicode = "force"
# end Py2K
class _PGArray(ARRAY):
def __init__(self, *arg, **kw):
super(_PGArray, self).__init__(*arg, **kw)
# Py2K
# FIXME: this check won't work for setups that
# have convert_unicode only on their create_engine().
if isinstance(self.item_type, sqltypes.String) and \
self.item_type.convert_unicode:
self.item_type.convert_unicode = "force"
# end Py2K
# When we're handed literal SQL, ensure it's a SELECT-query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression._TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return base.BufferedRowResultProxy(self)
else:
return base.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
# Py2K
supports_unicode_statements = False
# end Py2K
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric : _PGNumeric,
ENUM : _PGEnum, # needs force_unicode
sqltypes.Enum : _PGEnum, # needs force_unicode
ARRAY : _PGArray, # needs force_unicode
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None, **kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@classmethod
def dbapi(cls):
psycopg = __import__('psycopg2')
return psycopg
@util.memoized_property
def _isolation_lookup(self):
extensions = __import__('psycopg2.extensions').extensions
return {
'READ COMMITTED':extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED':extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ':extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE':extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
extensions = __import__('psycopg2.extensions').extensions
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
# these error messages from libpq: interfaces/libpq/fe-misc.c.
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
return 'terminating connection' in str(e) or \
'closed the connection' in str(e) or \
'connection not open' in str(e) or \
'could not receive data from server' in str(e)
elif isinstance(e, self.dbapi.InterfaceError):
# psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h
return 'connection already closed' in str(e) or \
'cursor already closed' in str(e)
elif isinstance(e, self.dbapi.ProgrammingError):
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
return "losed the connection unexpectedly" in str(e)
else:
return False
dialect = PGDialect_psycopg2
| bsd-3-clause | 1,876,613,324,961,498,400 | 38.468031 | 107 | 0.645607 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KPixmapSequence.py | 1 | 1041 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KPixmapSequence(): # skipped bases: <class 'sip.wrapper'>
# no doc
def frameAt(self, *args, **kwargs): # real signature unknown
pass
def frameCount(self, *args, **kwargs): # real signature unknown
pass
def frameSize(self, *args, **kwargs): # real signature unknown
pass
def isEmpty(self, *args, **kwargs): # real signature unknown
pass
def isValid(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| gpl-2.0 | 6,659,048,858,140,943,000 | 26.394737 | 101 | 0.662824 | false |
vimagick/youtube-dl | youtube_dl/extractor/channel9.py | 124 | 11345 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
The type of provided URL (video or playlist) is determined according to
meta Search.PageType from web page HTML rather than URL itself, as it is
not always possible to do.
'''
IE_DESC = 'Channel 9'
IE_NAME = 'channel9'
_VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
_TESTS = [
{
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
'md5': 'bbd75296ba47916b754e73c3a4bbdf10',
'info_dict': {
'id': 'Events/TechEd/Australia/2013/KOS002',
'ext': 'mp4',
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
},
},
{
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
'md5': 'b43ee4529d111bc37ba7ee4f34813e68',
'info_dict': {
'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing',
'ext': 'mp4',
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'authors': ['Mike Wilmot'],
},
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
# Sorted by quality
_known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4']
def _restore_bytes(self, formatted_size):
if not formatted_size:
return 0
m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size)
if not m:
return 0
units = m.group('units')
try:
exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper())
except ValueError:
return 0
size = float(m.group('size'))
return int(size * (1024 ** exponent))
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
<a\s+href="(?P<url>[^"]+)">(?P<quality>[^<]+)</a>\s*
<span\s+class="usage">\((?P<note>[^\)]+)\)</span>\s*
(?:<div\s+class="popup\s+rounded">\s*
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
# Extract known formats
formats = [{
'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
'preference': self._known_formats.index(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
self._sort_formats(formats)
return formats
def _extract_title(self, html):
title = self._html_search_meta('title', html, 'title')
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = ' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
return title
def _extract_description(self, html):
DESCRIPTION_REGEX = r'''(?sx)
<div\s+class="entry-content">\s*
<div\s+id="entry-body">\s*
(?P<description>.+?)\s*
</div>\s*
</div>
'''
m = re.search(DESCRIPTION_REGEX, html)
if m is not None:
return m.group('description')
return self._html_search_meta('description', html, 'description')
def _extract_duration(self, html):
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
m = re.search(r'<a href="(?P<slidesurl>[^"]+)" class="slides">Slides</a>', html)
return m.group('slidesurl') if m is not None else None
def _extract_zip(self, html):
m = re.search(r'<a href="(?P<zipurl>[^"]+)" class="zip">Zip</a>', html)
return m.group('zipurl') if m is not None else None
def _extract_avg_rating(self, html):
m = re.search(r'<p class="avg-rating">Avg Rating: <span>(?P<avgrating>[^<]+)</span></p>', html)
return float(m.group('avgrating')) if m is not None else 0
def _extract_rating_count(self, html):
m = re.search(r'<div class="rating-count">\((?P<ratingcount>[^<]+)\)</div>', html)
return int(self._fix_count(m.group('ratingcount'))) if m is not None else 0
def _extract_view_count(self, html):
m = re.search(r'<li class="views">\s*<span class="count">(?P<viewcount>[^<]+)</span> Views\s*</li>', html)
return int(self._fix_count(m.group('viewcount'))) if m is not None else 0
def _extract_comment_count(self, html):
m = re.search(r'<li class="comments">\s*<a href="#comments">\s*<span class="count">(?P<commentcount>[^<]+)</span> Comments\s*</a>\s*</li>', html)
return int(self._fix_count(m.group('commentcount'))) if m is not None else 0
def _fix_count(self, count):
return int(str(count).replace(',', '')) if count is not None else None
def _extract_authors(self, html):
m = re.search(r'(?s)<li class="author">(.*?)</li>', html)
if m is None:
return None
return re.findall(r'<a href="/Niners/[^"]+">([^<]+)</a>', m.group(1))
def _extract_session_code(self, html):
m = re.search(r'<li class="code">\s*(?P<code>.+?)\s*</li>', html)
return m.group('code') if m is not None else None
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
return m.group('day') if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
return m.group('room') if m is not None else None
def _extract_session_speakers(self, html):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
# Nothing to download
if len(formats) == 0 and slides is None and zip_ is None:
self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path)
return
# Extract meta
title = self._extract_title(html)
description = self._extract_description(html)
thumbnail = self._og_search_thumbnail(html)
duration = self._extract_duration(html)
avg_rating = self._extract_avg_rating(html)
rating_count = self._extract_rating_count(html)
view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html)
common = {
'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
result = []
if slides is not None:
d = common.copy()
d.update({'title': title + '-Slides', 'url': slides})
result.append(d)
if zip_ is not None:
d = common.copy()
d.update({'title': title + '-Zip', 'url': zip_})
result.append(d)
if len(formats) > 0:
d = common.copy()
d.update({'title': title, 'formats': formats})
result.append(d)
return result
def _extract_entry_item(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
authors = self._extract_authors(html)
for content in contents:
content['authors'] = authors
return contents
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
session_meta = {
'session_code': self._extract_session_code(html),
'session_day': self._extract_session_day(html),
'session_room': self._extract_session_room(html),
'session_speakers': self._extract_session_speakers(html),
}
for content in contents:
content.update(session_meta)
return self.playlist_result(contents)
def _extract_list(self, content_path):
rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
entries = [self.url_result(session_url.text, 'Channel9')
for session_url in rss.findall('./channel/item/link')]
title_text = rss.find('./channel/title').text
return self.playlist_result(entries, content_path, title_text)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
content_path = mobj.group('contentpath')
webpage = self._download_webpage(url, content_path, 'Downloading web page')
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
if page_type_m is not None:
page_type = page_type_m.group('pagetype')
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
elif page_type == 'Event':
return self._extract_list(content_path)
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
else: # Assuming list
return self._extract_list(content_path)
| unlicense | -722,020,901,182,227,300 | 39.956679 | 153 | 0.554429 | false |
lizoyu/kaggle-DigitRecognizer | jupyter/resume_train.py | 1 | 1272 | from lib.data_utils import get_MNIST_data
from keras.models import load_model
from keras.backend import tf as ktf
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, EarlyStopping
# Read the MNIST data. Notice that we assume that it's 'kaggle-DigitRecognizer/data/train.csv', and we use helper function to read into a dictionary.
# by default, there would be 41000 training data, 1000 test data and 1000 validation data(within traning set)
data = get_MNIST_data(fit=True)
# load the model(checkpoint)
tunemodel = load_model('../models/tuneResNet_early_04-0.0146.h5', custom_objects={'ktf': ktf})
# set the loss and optimizer
rmsprop = RMSprop(lr=0.0001)
tunemodel.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# fit the model
checkpoint = ModelCheckpoint('../models/tuneResNet_early_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=False)
earlystop = EarlyStopping(min_delta=0.001, patience=1)
tunemodel.fit(data['X_train'], data['y_train'].reshape(-1, 1),
batch_size=16, epochs=10, validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
callbacks=[checkpoint, earlystop], initial_epoch=5) | gpl-3.0 | 9,074,484,426,298,482,000 | 49.92 | 149 | 0.709119 | false |
GoogleCloudPlatform/ml-pipeline-generator-python | examples/kfp/demo.py | 1 | 2531 | # python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo for KubeFlow Pipelines."""
import json
import os
from ml_pipeline_gen.models import TFModel
from ml_pipeline_gen.pipelines import KfpPipeline
from model.census_preprocess import load_data
def _upload_data_to_gcs(model):
"""Calls the preprocessing fn which uploads train/eval data to GCS."""
load_data(model.data["train"], model.data["evaluation"])
# TODO(humichael): See if there's a way to support csv batch predicts.
def _upload_input_data_to_gcs(model, data):
input_path = "tf_input_data.json"
with open(input_path, "w+") as f:
for features in data:
f.write(json.dumps(features) + "\n")
model.upload_pred_input_data(input_path)
os.remove(input_path)
# pylint: disable=g-import-not-at-top
def main():
config = "config.yaml"
model = TFModel(config)
model.generate_files()
_upload_data_to_gcs(model)
pipeline = KfpPipeline(model)
# preprocess and upload dataset to expected location.
load_data(model.data["train"], model.data["evaluation"])
# define pipeline structure
p = pipeline.add_train_component()
pipeline.add_deploy_component(parent=p)
pipeline.add_predict_component(parent=p)
pipeline.print_structure()
pipeline.generate_pipeline()
# Create batch prediction data in GCS.
pred_input = [{
"age": 0.02599666,
"workclass": 6,
"education_num": 1.1365801,
"marital_status": 4,
"occupation": 0,
"relationship": 1,
"race": 4,
"capital_gain": 0.14693314,
"capital_loss": -0.21713187,
"hours_per_week": -0.034039237,
"native_country": 38,
"income_bracket": 0,
}]
_upload_input_data_to_gcs(model, pred_input)
# Run the pipeline.
# pylint: disable=import-outside-toplevel
from orchestration import pipeline as kfp_pipeline
kfp_pipeline.main()
if __name__ == "__main__":
main()
| apache-2.0 | -7,146,816,260,771,234,000 | 29.865854 | 74 | 0.674832 | false |
ernstp/kivy | kivy/tests/test_lang.py | 26 | 5715 | '''
Language tests
==============
'''
import unittest
from weakref import proxy
from functools import partial
class BaseClass(object):
uid = 0
# base class needed for builder
def __init__(self, **kwargs):
super(BaseClass, self).__init__()
self.proxy_ref = proxy(self)
self.children = []
self.parent = None
self.binded_func = {}
self.id = None
self.ids = {}
self.cls = []
self.ids = {}
self.uid = BaseClass.uid
BaseClass.uid += 1
def add_widget(self, widget):
self.children.append(widget)
widget.parent = self
def create_property(self, name, value=None):
pass
def is_event_type(self, key):
return key.startswith('on_')
def fbind(self, name, func, *largs):
self.binded_func[name] = partial(func, *largs)
return True
class TestClass(BaseClass):
obj = None
class TestClass2(BaseClass):
obj = None
class TestClass3(BaseClass):
obj = None
class LangTestCase(unittest.TestCase):
def import_builder(self):
from kivy.factory import Factory
from kivy.lang import BuilderBase
Builder = BuilderBase()
Factory.register('TestClass', cls=TestClass)
Factory.register('TestClass2', cls=TestClass2)
Factory.register('TestClass3', cls=TestClass3)
return Builder
def test_loading_failed_1(self):
# invalid indent
Builder = self.import_builder()
from kivy.lang import ParserException
try:
Builder.load_string('''#:kivy 1.0
<TestClass>:
''')
self.fail('Invalid indentation.')
except ParserException:
pass
def test_parser_numeric_1(self):
Builder = self.import_builder()
Builder.load_string('<TestClass>:\n\tobj: (.5, .5, .5)')
wid = TestClass()
Builder.apply(wid)
self.assertEqual(wid.obj, (0.5, 0.5, 0.5))
def test_parser_numeric_2(self):
Builder = self.import_builder()
Builder.load_string('<TestClass>:\n\tobj: (0.5, 0.5, 0.5)')
wid = TestClass()
Builder.apply(wid)
self.assertEqual(wid.obj, (0.5, 0.5, 0.5))
def test_references(self):
Builder = self.import_builder()
Builder.load_string('''
<TestClass>:
textinput: textinput
TestClass2:
id: textinput
''')
wid = TestClass()
Builder.apply(wid)
self.assertTrue(hasattr(wid, 'textinput'))
self.assertTrue(getattr(wid, 'textinput') is not None)
def test_references_with_template(self):
Builder = self.import_builder()
Builder.load_string('''
[Item@TestClass3]:
title: ctx.title
<TestClass>:
textinput: textinput
Item:
title: 'bleh'
TestClass2:
id: textinput
''')
wid = TestClass()
Builder.apply(wid)
self.assertTrue(hasattr(wid, 'textinput'))
self.assertTrue(getattr(wid, 'textinput') is not None)
def test_references_with_template_case_2(self):
Builder = self.import_builder()
Builder.load_string('''
[Item@TestClass3]:
title: ctx.title
<TestClass>:
textinput: textinput
TestClass2:
id: textinput
Item:
title: 'bleh'
''')
wid = TestClass()
Builder.apply(wid)
self.assertTrue(hasattr(wid, 'textinput'))
self.assertTrue(getattr(wid, 'textinput') is not None)
def test_references_with_template_case_3(self):
Builder = self.import_builder()
Builder.load_string('''
[Item@TestClass3]:
title: ctx.title
<TestClass>:
textinput: textinput
TestClass2:
Item:
title: 'bleh'
TestClass2:
TestClass2:
id: textinput
''')
wid = TestClass()
Builder.apply(wid)
self.assertTrue(hasattr(wid, 'textinput'))
self.assertTrue(getattr(wid, 'textinput') is not None)
def test_with_multiline(self):
Builder = self.import_builder()
Builder.load_string('''
<TestClass>:
on_press:
print('hello world')
print('this is working !')
self.a = 1
''')
wid = TestClass()
Builder.apply(wid)
wid.a = 0
self.assertTrue('on_press' in wid.binded_func)
wid.binded_func['on_press']()
self.assertEquals(wid.a, 1)
def test_with_eight_spaces(self):
Builder = self.import_builder()
Builder.load_string('''
<TestClass>:
on_press:
print('hello world')
print('this is working !')
self.a = 1
''')
wid = TestClass()
Builder.apply(wid)
wid.a = 0
self.assertTrue('on_press' in wid.binded_func)
wid.binded_func['on_press']()
self.assertEquals(wid.a, 1)
def test_with_one_space(self):
Builder = self.import_builder()
Builder.load_string('''
<TestClass>:
on_press:
print('hello world')
print('this is working !')
self.a = 1
''')
wid = TestClass()
Builder.apply(wid)
wid.a = 0
self.assertTrue('on_press' in wid.binded_func)
wid.binded_func['on_press']()
self.assertEquals(wid.a, 1)
def test_with_two_spaces(self):
Builder = self.import_builder()
Builder.load_string('''
<TestClass>:
on_press:
print('hello world')
print('this is working !')
self.a = 1
''')
wid = TestClass()
Builder.apply(wid)
wid.a = 0
self.assertTrue('on_press' in wid.binded_func)
wid.binded_func['on_press']()
self.assertEquals(wid.a, 1)
| mit | 8,849,077,230,448,503,000 | 24.065789 | 67 | 0.569029 | false |
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/tm/tmsamlssoprofile.py | 3 | 70968 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class tmsamlssoprofile(base_resource) :
""" Configuration for SAML sso action resource. """
def __init__(self) :
self._name = ""
self._samlsigningcertname = ""
self._assertionconsumerserviceurl = ""
self._relaystaterule = ""
self._sendpassword = ""
self._samlissuername = ""
self._signaturealg = ""
self._digestmethod = ""
self._audience = ""
self._nameidformat = ""
self._nameidexpr = ""
self._attribute1 = ""
self._attribute1expr = ""
self._attribute1friendlyname = ""
self._attribute1format = ""
self._attribute2 = ""
self._attribute2expr = ""
self._attribute2friendlyname = ""
self._attribute2format = ""
self._attribute3 = ""
self._attribute3expr = ""
self._attribute3friendlyname = ""
self._attribute3format = ""
self._attribute4 = ""
self._attribute4expr = ""
self._attribute4friendlyname = ""
self._attribute4format = ""
self._attribute5 = ""
self._attribute5expr = ""
self._attribute5friendlyname = ""
self._attribute5format = ""
self._attribute6 = ""
self._attribute6expr = ""
self._attribute6friendlyname = ""
self._attribute6format = ""
self._attribute7 = ""
self._attribute7expr = ""
self._attribute7friendlyname = ""
self._attribute7format = ""
self._attribute8 = ""
self._attribute8expr = ""
self._attribute8friendlyname = ""
self._attribute8format = ""
self._attribute9 = ""
self._attribute9expr = ""
self._attribute9friendlyname = ""
self._attribute9format = ""
self._attribute10 = ""
self._attribute10expr = ""
self._attribute10friendlyname = ""
self._attribute10format = ""
self._attribute11 = ""
self._attribute11expr = ""
self._attribute11friendlyname = ""
self._attribute11format = ""
self._attribute12 = ""
self._attribute12expr = ""
self._attribute12friendlyname = ""
self._attribute12format = ""
self._attribute13 = ""
self._attribute13expr = ""
self._attribute13friendlyname = ""
self._attribute13format = ""
self._attribute14 = ""
self._attribute14expr = ""
self._attribute14friendlyname = ""
self._attribute14format = ""
self._attribute15 = ""
self._attribute15expr = ""
self._attribute15friendlyname = ""
self._attribute15format = ""
self._attribute16 = ""
self._attribute16expr = ""
self._attribute16friendlyname = ""
self._attribute16format = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the new saml single sign-on profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after an SSO action is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the new saml single sign-on profile. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after an SSO action is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def samlsigningcertname(self) :
ur"""Name of the signing authority as given in the SAML server's SSL certificate.<br/>Minimum length = 1.
"""
try :
return self._samlsigningcertname
except Exception as e:
raise e
@samlsigningcertname.setter
def samlsigningcertname(self, samlsigningcertname) :
ur"""Name of the signing authority as given in the SAML server's SSL certificate.<br/>Minimum length = 1
"""
try :
self._samlsigningcertname = samlsigningcertname
except Exception as e:
raise e
@property
def assertionconsumerserviceurl(self) :
ur"""URL to which the assertion is to be sent.<br/>Minimum length = 1.
"""
try :
return self._assertionconsumerserviceurl
except Exception as e:
raise e
@assertionconsumerserviceurl.setter
def assertionconsumerserviceurl(self, assertionconsumerserviceurl) :
ur"""URL to which the assertion is to be sent.<br/>Minimum length = 1
"""
try :
self._assertionconsumerserviceurl = assertionconsumerserviceurl
except Exception as e:
raise e
@property
def relaystaterule(self) :
ur"""Expression to extract relaystate to be sent along with assertion. Evaluation of this expression should return TEXT content. This is typically a targ
et url to which user is redirected after the recipient validates SAML token.
"""
try :
return self._relaystaterule
except Exception as e:
raise e
@relaystaterule.setter
def relaystaterule(self, relaystaterule) :
ur"""Expression to extract relaystate to be sent along with assertion. Evaluation of this expression should return TEXT content. This is typically a targ
et url to which user is redirected after the recipient validates SAML token.
"""
try :
self._relaystaterule = relaystaterule
except Exception as e:
raise e
@property
def sendpassword(self) :
ur"""Option to send password in assertion.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sendpassword
except Exception as e:
raise e
@sendpassword.setter
def sendpassword(self, sendpassword) :
ur"""Option to send password in assertion.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._sendpassword = sendpassword
except Exception as e:
raise e
@property
def samlissuername(self) :
ur"""The name to be used in requests sent from Netscaler to IdP to uniquely identify Netscaler.<br/>Minimum length = 1.
"""
try :
return self._samlissuername
except Exception as e:
raise e
@samlissuername.setter
def samlissuername(self, samlissuername) :
ur"""The name to be used in requests sent from Netscaler to IdP to uniquely identify Netscaler.<br/>Minimum length = 1
"""
try :
self._samlissuername = samlissuername
except Exception as e:
raise e
@property
def signaturealg(self) :
ur"""Algorithm to be used to sign/verify SAML transactions.<br/>Default value: RSA-SHA1<br/>Possible values = RSA-SHA1, RSA-SHA256.
"""
try :
return self._signaturealg
except Exception as e:
raise e
@signaturealg.setter
def signaturealg(self, signaturealg) :
ur"""Algorithm to be used to sign/verify SAML transactions.<br/>Default value: RSA-SHA1<br/>Possible values = RSA-SHA1, RSA-SHA256
"""
try :
self._signaturealg = signaturealg
except Exception as e:
raise e
@property
def digestmethod(self) :
ur"""Algorithm to be used to compute/verify digest for SAML transactions.<br/>Default value: SHA1<br/>Possible values = SHA1, SHA256.
"""
try :
return self._digestmethod
except Exception as e:
raise e
@digestmethod.setter
def digestmethod(self, digestmethod) :
ur"""Algorithm to be used to compute/verify digest for SAML transactions.<br/>Default value: SHA1<br/>Possible values = SHA1, SHA256
"""
try :
self._digestmethod = digestmethod
except Exception as e:
raise e
@property
def audience(self) :
ur"""Audience for which assertion sent by IdP is applicable. This is typically entity name or url that represents ServiceProvider.<br/>Maximum length = 256.
"""
try :
return self._audience
except Exception as e:
raise e
@audience.setter
def audience(self, audience) :
ur"""Audience for which assertion sent by IdP is applicable. This is typically entity name or url that represents ServiceProvider.<br/>Maximum length = 256
"""
try :
self._audience = audience
except Exception as e:
raise e
@property
def nameidformat(self) :
ur"""Format of Name Identifier sent in Assertion.<br/>Default value: transient<br/>Possible values = Unspecified, emailAddress, X509SubjectName, WindowsDomainQualifiedName, kerberos, entity, persistent, transient.
"""
try :
return self._nameidformat
except Exception as e:
raise e
@nameidformat.setter
def nameidformat(self, nameidformat) :
ur"""Format of Name Identifier sent in Assertion.<br/>Default value: transient<br/>Possible values = Unspecified, emailAddress, X509SubjectName, WindowsDomainQualifiedName, kerberos, entity, persistent, transient
"""
try :
self._nameidformat = nameidformat
except Exception as e:
raise e
@property
def nameidexpr(self) :
ur"""Expression that will be evaluated to obtain NameIdentifier to be sent in assertion.<br/>Maximum length = 128.
"""
try :
return self._nameidexpr
except Exception as e:
raise e
@nameidexpr.setter
def nameidexpr(self, nameidexpr) :
ur"""Expression that will be evaluated to obtain NameIdentifier to be sent in assertion.<br/>Maximum length = 128
"""
try :
self._nameidexpr = nameidexpr
except Exception as e:
raise e
@property
def attribute1(self) :
ur"""Name of attribute1 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute1
except Exception as e:
raise e
@attribute1.setter
def attribute1(self, attribute1) :
ur"""Name of attribute1 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute1 = attribute1
except Exception as e:
raise e
@property
def attribute1expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute1expr
except Exception as e:
raise e
@attribute1expr.setter
def attribute1expr(self, attribute1expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute1expr = attribute1expr
except Exception as e:
raise e
@property
def attribute1friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute1friendlyname
except Exception as e:
raise e
@attribute1friendlyname.setter
def attribute1friendlyname(self, attribute1friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute1friendlyname = attribute1friendlyname
except Exception as e:
raise e
@property
def attribute1format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute1format
except Exception as e:
raise e
@attribute1format.setter
def attribute1format(self, attribute1format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute1format = attribute1format
except Exception as e:
raise e
@property
def attribute2(self) :
ur"""Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute2
except Exception as e:
raise e
@attribute2.setter
def attribute2(self, attribute2) :
ur"""Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute2 = attribute2
except Exception as e:
raise e
@property
def attribute2expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute2expr
except Exception as e:
raise e
@attribute2expr.setter
def attribute2expr(self, attribute2expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute2expr = attribute2expr
except Exception as e:
raise e
@property
def attribute2friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute2friendlyname
except Exception as e:
raise e
@attribute2friendlyname.setter
def attribute2friendlyname(self, attribute2friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute2friendlyname = attribute2friendlyname
except Exception as e:
raise e
@property
def attribute2format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute2format
except Exception as e:
raise e
@attribute2format.setter
def attribute2format(self, attribute2format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute2format = attribute2format
except Exception as e:
raise e
@property
def attribute3(self) :
ur"""Name of attribute3 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute3
except Exception as e:
raise e
@attribute3.setter
def attribute3(self, attribute3) :
ur"""Name of attribute3 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute3 = attribute3
except Exception as e:
raise e
@property
def attribute3expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute3expr
except Exception as e:
raise e
@attribute3expr.setter
def attribute3expr(self, attribute3expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute3expr = attribute3expr
except Exception as e:
raise e
@property
def attribute3friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute3friendlyname
except Exception as e:
raise e
@attribute3friendlyname.setter
def attribute3friendlyname(self, attribute3friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute3friendlyname = attribute3friendlyname
except Exception as e:
raise e
@property
def attribute3format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute3format
except Exception as e:
raise e
@attribute3format.setter
def attribute3format(self, attribute3format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute3format = attribute3format
except Exception as e:
raise e
@property
def attribute4(self) :
ur"""Name of attribute4 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute4
except Exception as e:
raise e
@attribute4.setter
def attribute4(self, attribute4) :
ur"""Name of attribute4 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute4 = attribute4
except Exception as e:
raise e
@property
def attribute4expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute4expr
except Exception as e:
raise e
@attribute4expr.setter
def attribute4expr(self, attribute4expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute4expr = attribute4expr
except Exception as e:
raise e
@property
def attribute4friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute4friendlyname
except Exception as e:
raise e
@attribute4friendlyname.setter
def attribute4friendlyname(self, attribute4friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute4friendlyname = attribute4friendlyname
except Exception as e:
raise e
@property
def attribute4format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute4format
except Exception as e:
raise e
@attribute4format.setter
def attribute4format(self, attribute4format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute4format = attribute4format
except Exception as e:
raise e
@property
def attribute5(self) :
ur"""Name of attribute5 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute5
except Exception as e:
raise e
@attribute5.setter
def attribute5(self, attribute5) :
ur"""Name of attribute5 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute5 = attribute5
except Exception as e:
raise e
@property
def attribute5expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute5expr
except Exception as e:
raise e
@attribute5expr.setter
def attribute5expr(self, attribute5expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute5expr = attribute5expr
except Exception as e:
raise e
@property
def attribute5friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute5friendlyname
except Exception as e:
raise e
@attribute5friendlyname.setter
def attribute5friendlyname(self, attribute5friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute5friendlyname = attribute5friendlyname
except Exception as e:
raise e
@property
def attribute5format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute5format
except Exception as e:
raise e
@attribute5format.setter
def attribute5format(self, attribute5format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute5format = attribute5format
except Exception as e:
raise e
@property
def attribute6(self) :
ur"""Name of attribute6 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute6
except Exception as e:
raise e
@attribute6.setter
def attribute6(self, attribute6) :
ur"""Name of attribute6 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute6 = attribute6
except Exception as e:
raise e
@property
def attribute6expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute6expr
except Exception as e:
raise e
@attribute6expr.setter
def attribute6expr(self, attribute6expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute6expr = attribute6expr
except Exception as e:
raise e
@property
def attribute6friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute6friendlyname
except Exception as e:
raise e
@attribute6friendlyname.setter
def attribute6friendlyname(self, attribute6friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute6friendlyname = attribute6friendlyname
except Exception as e:
raise e
@property
def attribute6format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute6format
except Exception as e:
raise e
@attribute6format.setter
def attribute6format(self, attribute6format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute6format = attribute6format
except Exception as e:
raise e
@property
def attribute7(self) :
ur"""Name of attribute7 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute7
except Exception as e:
raise e
@attribute7.setter
def attribute7(self, attribute7) :
ur"""Name of attribute7 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute7 = attribute7
except Exception as e:
raise e
@property
def attribute7expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute7expr
except Exception as e:
raise e
@attribute7expr.setter
def attribute7expr(self, attribute7expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute7expr = attribute7expr
except Exception as e:
raise e
@property
def attribute7friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute7friendlyname
except Exception as e:
raise e
@attribute7friendlyname.setter
def attribute7friendlyname(self, attribute7friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute7friendlyname = attribute7friendlyname
except Exception as e:
raise e
@property
def attribute7format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute7format
except Exception as e:
raise e
@attribute7format.setter
def attribute7format(self, attribute7format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute7format = attribute7format
except Exception as e:
raise e
@property
def attribute8(self) :
ur"""Name of attribute8 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute8
except Exception as e:
raise e
@attribute8.setter
def attribute8(self, attribute8) :
ur"""Name of attribute8 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute8 = attribute8
except Exception as e:
raise e
@property
def attribute8expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute8expr
except Exception as e:
raise e
@attribute8expr.setter
def attribute8expr(self, attribute8expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute8expr = attribute8expr
except Exception as e:
raise e
@property
def attribute8friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute8friendlyname
except Exception as e:
raise e
@attribute8friendlyname.setter
def attribute8friendlyname(self, attribute8friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute8friendlyname = attribute8friendlyname
except Exception as e:
raise e
@property
def attribute8format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute8format
except Exception as e:
raise e
@attribute8format.setter
def attribute8format(self, attribute8format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute8format = attribute8format
except Exception as e:
raise e
@property
def attribute9(self) :
ur"""Name of attribute9 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute9
except Exception as e:
raise e
@attribute9.setter
def attribute9(self, attribute9) :
ur"""Name of attribute9 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute9 = attribute9
except Exception as e:
raise e
@property
def attribute9expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute9expr
except Exception as e:
raise e
@attribute9expr.setter
def attribute9expr(self, attribute9expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute9expr = attribute9expr
except Exception as e:
raise e
@property
def attribute9friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute9friendlyname
except Exception as e:
raise e
@attribute9friendlyname.setter
def attribute9friendlyname(self, attribute9friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute9friendlyname = attribute9friendlyname
except Exception as e:
raise e
@property
def attribute9format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute9format
except Exception as e:
raise e
@attribute9format.setter
def attribute9format(self, attribute9format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute9format = attribute9format
except Exception as e:
raise e
@property
def attribute10(self) :
ur"""Name of attribute10 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute10
except Exception as e:
raise e
@attribute10.setter
def attribute10(self, attribute10) :
ur"""Name of attribute10 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute10 = attribute10
except Exception as e:
raise e
@property
def attribute10expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute10expr
except Exception as e:
raise e
@attribute10expr.setter
def attribute10expr(self, attribute10expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute10expr = attribute10expr
except Exception as e:
raise e
@property
def attribute10friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute10friendlyname
except Exception as e:
raise e
@attribute10friendlyname.setter
def attribute10friendlyname(self, attribute10friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute10friendlyname = attribute10friendlyname
except Exception as e:
raise e
@property
def attribute10format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute10format
except Exception as e:
raise e
@attribute10format.setter
def attribute10format(self, attribute10format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute10format = attribute10format
except Exception as e:
raise e
@property
def attribute11(self) :
ur"""Name of attribute11 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute11
except Exception as e:
raise e
@attribute11.setter
def attribute11(self, attribute11) :
ur"""Name of attribute11 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute11 = attribute11
except Exception as e:
raise e
@property
def attribute11expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute11expr
except Exception as e:
raise e
@attribute11expr.setter
def attribute11expr(self, attribute11expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute11expr = attribute11expr
except Exception as e:
raise e
@property
def attribute11friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute11friendlyname
except Exception as e:
raise e
@attribute11friendlyname.setter
def attribute11friendlyname(self, attribute11friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute11friendlyname = attribute11friendlyname
except Exception as e:
raise e
@property
def attribute11format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute11format
except Exception as e:
raise e
@attribute11format.setter
def attribute11format(self, attribute11format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute11format = attribute11format
except Exception as e:
raise e
@property
def attribute12(self) :
ur"""Name of attribute12 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute12
except Exception as e:
raise e
@attribute12.setter
def attribute12(self, attribute12) :
ur"""Name of attribute12 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute12 = attribute12
except Exception as e:
raise e
@property
def attribute12expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute12expr
except Exception as e:
raise e
@attribute12expr.setter
def attribute12expr(self, attribute12expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute12expr = attribute12expr
except Exception as e:
raise e
@property
def attribute12friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute12friendlyname
except Exception as e:
raise e
@attribute12friendlyname.setter
def attribute12friendlyname(self, attribute12friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute12friendlyname = attribute12friendlyname
except Exception as e:
raise e
@property
def attribute12format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute12format
except Exception as e:
raise e
@attribute12format.setter
def attribute12format(self, attribute12format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute12format = attribute12format
except Exception as e:
raise e
@property
def attribute13(self) :
ur"""Name of attribute13 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute13
except Exception as e:
raise e
@attribute13.setter
def attribute13(self, attribute13) :
ur"""Name of attribute13 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute13 = attribute13
except Exception as e:
raise e
@property
def attribute13expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute13expr
except Exception as e:
raise e
@attribute13expr.setter
def attribute13expr(self, attribute13expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute13expr = attribute13expr
except Exception as e:
raise e
@property
def attribute13friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute13friendlyname
except Exception as e:
raise e
@attribute13friendlyname.setter
def attribute13friendlyname(self, attribute13friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute13friendlyname = attribute13friendlyname
except Exception as e:
raise e
@property
def attribute13format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute13format
except Exception as e:
raise e
@attribute13format.setter
def attribute13format(self, attribute13format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute13format = attribute13format
except Exception as e:
raise e
@property
def attribute14(self) :
ur"""Name of attribute14 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute14
except Exception as e:
raise e
@attribute14.setter
def attribute14(self, attribute14) :
ur"""Name of attribute14 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute14 = attribute14
except Exception as e:
raise e
@property
def attribute14expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute14expr
except Exception as e:
raise e
@attribute14expr.setter
def attribute14expr(self, attribute14expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute14expr = attribute14expr
except Exception as e:
raise e
@property
def attribute14friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute14friendlyname
except Exception as e:
raise e
@attribute14friendlyname.setter
def attribute14friendlyname(self, attribute14friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute14friendlyname = attribute14friendlyname
except Exception as e:
raise e
@property
def attribute14format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute14format
except Exception as e:
raise e
@attribute14format.setter
def attribute14format(self, attribute14format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute14format = attribute14format
except Exception as e:
raise e
@property
def attribute15(self) :
ur"""Name of attribute15 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute15
except Exception as e:
raise e
@attribute15.setter
def attribute15(self, attribute15) :
ur"""Name of attribute15 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute15 = attribute15
except Exception as e:
raise e
@property
def attribute15expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute15expr
except Exception as e:
raise e
@attribute15expr.setter
def attribute15expr(self, attribute15expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute15expr = attribute15expr
except Exception as e:
raise e
@property
def attribute15friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute15friendlyname
except Exception as e:
raise e
@attribute15friendlyname.setter
def attribute15friendlyname(self, attribute15friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute15friendlyname = attribute15friendlyname
except Exception as e:
raise e
@property
def attribute15format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute15format
except Exception as e:
raise e
@attribute15format.setter
def attribute15format(self, attribute15format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute15format = attribute15format
except Exception as e:
raise e
@property
def attribute16(self) :
ur"""Name of attribute16 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute16
except Exception as e:
raise e
@attribute16.setter
def attribute16(self, attribute16) :
ur"""Name of attribute16 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute16 = attribute16
except Exception as e:
raise e
@property
def attribute16expr(self) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128.
"""
try :
return self._attribute16expr
except Exception as e:
raise e
@attribute16expr.setter
def attribute16expr(self, attribute16expr) :
ur"""Expression that will be evaluated to obtain attribute1's value to be sent in Assertion.<br/>Maximum length = 128
"""
try :
self._attribute16expr = attribute16expr
except Exception as e:
raise e
@property
def attribute16friendlyname(self) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64.
"""
try :
return self._attribute16friendlyname
except Exception as e:
raise e
@attribute16friendlyname.setter
def attribute16friendlyname(self, attribute16friendlyname) :
ur"""User-Friendly Name of attribute2 that needs to be sent in SAML Assertion.<br/>Maximum length = 64
"""
try :
self._attribute16friendlyname = attribute16friendlyname
except Exception as e:
raise e
@property
def attribute16format(self) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic.
"""
try :
return self._attribute16format
except Exception as e:
raise e
@attribute16format.setter
def attribute16format(self, attribute16format) :
ur"""Format of Attribute1 to be sent in Assertion.<br/>Default value: SAML_ATTR_UNSPECIFIED<br/>Possible values = URI, Basic
"""
try :
self._attribute16format = attribute16format
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(tmsamlssoprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tmsamlssoprofile
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add tmsamlssoprofile.
"""
try :
if type(resource) is not list :
addresource = tmsamlssoprofile()
addresource.name = resource.name
addresource.samlsigningcertname = resource.samlsigningcertname
addresource.assertionconsumerserviceurl = resource.assertionconsumerserviceurl
addresource.relaystaterule = resource.relaystaterule
addresource.sendpassword = resource.sendpassword
addresource.samlissuername = resource.samlissuername
addresource.signaturealg = resource.signaturealg
addresource.digestmethod = resource.digestmethod
addresource.audience = resource.audience
addresource.nameidformat = resource.nameidformat
addresource.nameidexpr = resource.nameidexpr
addresource.attribute1 = resource.attribute1
addresource.attribute1expr = resource.attribute1expr
addresource.attribute1friendlyname = resource.attribute1friendlyname
addresource.attribute1format = resource.attribute1format
addresource.attribute2 = resource.attribute2
addresource.attribute2expr = resource.attribute2expr
addresource.attribute2friendlyname = resource.attribute2friendlyname
addresource.attribute2format = resource.attribute2format
addresource.attribute3 = resource.attribute3
addresource.attribute3expr = resource.attribute3expr
addresource.attribute3friendlyname = resource.attribute3friendlyname
addresource.attribute3format = resource.attribute3format
addresource.attribute4 = resource.attribute4
addresource.attribute4expr = resource.attribute4expr
addresource.attribute4friendlyname = resource.attribute4friendlyname
addresource.attribute4format = resource.attribute4format
addresource.attribute5 = resource.attribute5
addresource.attribute5expr = resource.attribute5expr
addresource.attribute5friendlyname = resource.attribute5friendlyname
addresource.attribute5format = resource.attribute5format
addresource.attribute6 = resource.attribute6
addresource.attribute6expr = resource.attribute6expr
addresource.attribute6friendlyname = resource.attribute6friendlyname
addresource.attribute6format = resource.attribute6format
addresource.attribute7 = resource.attribute7
addresource.attribute7expr = resource.attribute7expr
addresource.attribute7friendlyname = resource.attribute7friendlyname
addresource.attribute7format = resource.attribute7format
addresource.attribute8 = resource.attribute8
addresource.attribute8expr = resource.attribute8expr
addresource.attribute8friendlyname = resource.attribute8friendlyname
addresource.attribute8format = resource.attribute8format
addresource.attribute9 = resource.attribute9
addresource.attribute9expr = resource.attribute9expr
addresource.attribute9friendlyname = resource.attribute9friendlyname
addresource.attribute9format = resource.attribute9format
addresource.attribute10 = resource.attribute10
addresource.attribute10expr = resource.attribute10expr
addresource.attribute10friendlyname = resource.attribute10friendlyname
addresource.attribute10format = resource.attribute10format
addresource.attribute11 = resource.attribute11
addresource.attribute11expr = resource.attribute11expr
addresource.attribute11friendlyname = resource.attribute11friendlyname
addresource.attribute11format = resource.attribute11format
addresource.attribute12 = resource.attribute12
addresource.attribute12expr = resource.attribute12expr
addresource.attribute12friendlyname = resource.attribute12friendlyname
addresource.attribute12format = resource.attribute12format
addresource.attribute13 = resource.attribute13
addresource.attribute13expr = resource.attribute13expr
addresource.attribute13friendlyname = resource.attribute13friendlyname
addresource.attribute13format = resource.attribute13format
addresource.attribute14 = resource.attribute14
addresource.attribute14expr = resource.attribute14expr
addresource.attribute14friendlyname = resource.attribute14friendlyname
addresource.attribute14format = resource.attribute14format
addresource.attribute15 = resource.attribute15
addresource.attribute15expr = resource.attribute15expr
addresource.attribute15friendlyname = resource.attribute15friendlyname
addresource.attribute15format = resource.attribute15format
addresource.attribute16 = resource.attribute16
addresource.attribute16expr = resource.attribute16expr
addresource.attribute16friendlyname = resource.attribute16friendlyname
addresource.attribute16format = resource.attribute16format
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].samlsigningcertname = resource[i].samlsigningcertname
addresources[i].assertionconsumerserviceurl = resource[i].assertionconsumerserviceurl
addresources[i].relaystaterule = resource[i].relaystaterule
addresources[i].sendpassword = resource[i].sendpassword
addresources[i].samlissuername = resource[i].samlissuername
addresources[i].signaturealg = resource[i].signaturealg
addresources[i].digestmethod = resource[i].digestmethod
addresources[i].audience = resource[i].audience
addresources[i].nameidformat = resource[i].nameidformat
addresources[i].nameidexpr = resource[i].nameidexpr
addresources[i].attribute1 = resource[i].attribute1
addresources[i].attribute1expr = resource[i].attribute1expr
addresources[i].attribute1friendlyname = resource[i].attribute1friendlyname
addresources[i].attribute1format = resource[i].attribute1format
addresources[i].attribute2 = resource[i].attribute2
addresources[i].attribute2expr = resource[i].attribute2expr
addresources[i].attribute2friendlyname = resource[i].attribute2friendlyname
addresources[i].attribute2format = resource[i].attribute2format
addresources[i].attribute3 = resource[i].attribute3
addresources[i].attribute3expr = resource[i].attribute3expr
addresources[i].attribute3friendlyname = resource[i].attribute3friendlyname
addresources[i].attribute3format = resource[i].attribute3format
addresources[i].attribute4 = resource[i].attribute4
addresources[i].attribute4expr = resource[i].attribute4expr
addresources[i].attribute4friendlyname = resource[i].attribute4friendlyname
addresources[i].attribute4format = resource[i].attribute4format
addresources[i].attribute5 = resource[i].attribute5
addresources[i].attribute5expr = resource[i].attribute5expr
addresources[i].attribute5friendlyname = resource[i].attribute5friendlyname
addresources[i].attribute5format = resource[i].attribute5format
addresources[i].attribute6 = resource[i].attribute6
addresources[i].attribute6expr = resource[i].attribute6expr
addresources[i].attribute6friendlyname = resource[i].attribute6friendlyname
addresources[i].attribute6format = resource[i].attribute6format
addresources[i].attribute7 = resource[i].attribute7
addresources[i].attribute7expr = resource[i].attribute7expr
addresources[i].attribute7friendlyname = resource[i].attribute7friendlyname
addresources[i].attribute7format = resource[i].attribute7format
addresources[i].attribute8 = resource[i].attribute8
addresources[i].attribute8expr = resource[i].attribute8expr
addresources[i].attribute8friendlyname = resource[i].attribute8friendlyname
addresources[i].attribute8format = resource[i].attribute8format
addresources[i].attribute9 = resource[i].attribute9
addresources[i].attribute9expr = resource[i].attribute9expr
addresources[i].attribute9friendlyname = resource[i].attribute9friendlyname
addresources[i].attribute9format = resource[i].attribute9format
addresources[i].attribute10 = resource[i].attribute10
addresources[i].attribute10expr = resource[i].attribute10expr
addresources[i].attribute10friendlyname = resource[i].attribute10friendlyname
addresources[i].attribute10format = resource[i].attribute10format
addresources[i].attribute11 = resource[i].attribute11
addresources[i].attribute11expr = resource[i].attribute11expr
addresources[i].attribute11friendlyname = resource[i].attribute11friendlyname
addresources[i].attribute11format = resource[i].attribute11format
addresources[i].attribute12 = resource[i].attribute12
addresources[i].attribute12expr = resource[i].attribute12expr
addresources[i].attribute12friendlyname = resource[i].attribute12friendlyname
addresources[i].attribute12format = resource[i].attribute12format
addresources[i].attribute13 = resource[i].attribute13
addresources[i].attribute13expr = resource[i].attribute13expr
addresources[i].attribute13friendlyname = resource[i].attribute13friendlyname
addresources[i].attribute13format = resource[i].attribute13format
addresources[i].attribute14 = resource[i].attribute14
addresources[i].attribute14expr = resource[i].attribute14expr
addresources[i].attribute14friendlyname = resource[i].attribute14friendlyname
addresources[i].attribute14format = resource[i].attribute14format
addresources[i].attribute15 = resource[i].attribute15
addresources[i].attribute15expr = resource[i].attribute15expr
addresources[i].attribute15friendlyname = resource[i].attribute15friendlyname
addresources[i].attribute15format = resource[i].attribute15format
addresources[i].attribute16 = resource[i].attribute16
addresources[i].attribute16expr = resource[i].attribute16expr
addresources[i].attribute16friendlyname = resource[i].attribute16friendlyname
addresources[i].attribute16format = resource[i].attribute16format
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete tmsamlssoprofile.
"""
try :
if type(resource) is not list :
deleteresource = tmsamlssoprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update tmsamlssoprofile.
"""
try :
if type(resource) is not list :
updateresource = tmsamlssoprofile()
updateresource.name = resource.name
updateresource.samlsigningcertname = resource.samlsigningcertname
updateresource.assertionconsumerserviceurl = resource.assertionconsumerserviceurl
updateresource.sendpassword = resource.sendpassword
updateresource.samlissuername = resource.samlissuername
updateresource.relaystaterule = resource.relaystaterule
updateresource.signaturealg = resource.signaturealg
updateresource.digestmethod = resource.digestmethod
updateresource.audience = resource.audience
updateresource.nameidformat = resource.nameidformat
updateresource.nameidexpr = resource.nameidexpr
updateresource.attribute1 = resource.attribute1
updateresource.attribute1expr = resource.attribute1expr
updateresource.attribute1friendlyname = resource.attribute1friendlyname
updateresource.attribute1format = resource.attribute1format
updateresource.attribute2 = resource.attribute2
updateresource.attribute2expr = resource.attribute2expr
updateresource.attribute2friendlyname = resource.attribute2friendlyname
updateresource.attribute2format = resource.attribute2format
updateresource.attribute3 = resource.attribute3
updateresource.attribute3expr = resource.attribute3expr
updateresource.attribute3friendlyname = resource.attribute3friendlyname
updateresource.attribute3format = resource.attribute3format
updateresource.attribute4 = resource.attribute4
updateresource.attribute4expr = resource.attribute4expr
updateresource.attribute4friendlyname = resource.attribute4friendlyname
updateresource.attribute4format = resource.attribute4format
updateresource.attribute5 = resource.attribute5
updateresource.attribute5expr = resource.attribute5expr
updateresource.attribute5friendlyname = resource.attribute5friendlyname
updateresource.attribute5format = resource.attribute5format
updateresource.attribute6 = resource.attribute6
updateresource.attribute6expr = resource.attribute6expr
updateresource.attribute6friendlyname = resource.attribute6friendlyname
updateresource.attribute6format = resource.attribute6format
updateresource.attribute7 = resource.attribute7
updateresource.attribute7expr = resource.attribute7expr
updateresource.attribute7friendlyname = resource.attribute7friendlyname
updateresource.attribute7format = resource.attribute7format
updateresource.attribute8 = resource.attribute8
updateresource.attribute8expr = resource.attribute8expr
updateresource.attribute8friendlyname = resource.attribute8friendlyname
updateresource.attribute8format = resource.attribute8format
updateresource.attribute9 = resource.attribute9
updateresource.attribute9expr = resource.attribute9expr
updateresource.attribute9friendlyname = resource.attribute9friendlyname
updateresource.attribute9format = resource.attribute9format
updateresource.attribute10 = resource.attribute10
updateresource.attribute10expr = resource.attribute10expr
updateresource.attribute10friendlyname = resource.attribute10friendlyname
updateresource.attribute10format = resource.attribute10format
updateresource.attribute11 = resource.attribute11
updateresource.attribute11expr = resource.attribute11expr
updateresource.attribute11friendlyname = resource.attribute11friendlyname
updateresource.attribute11format = resource.attribute11format
updateresource.attribute12 = resource.attribute12
updateresource.attribute12expr = resource.attribute12expr
updateresource.attribute12friendlyname = resource.attribute12friendlyname
updateresource.attribute12format = resource.attribute12format
updateresource.attribute13 = resource.attribute13
updateresource.attribute13expr = resource.attribute13expr
updateresource.attribute13friendlyname = resource.attribute13friendlyname
updateresource.attribute13format = resource.attribute13format
updateresource.attribute14 = resource.attribute14
updateresource.attribute14expr = resource.attribute14expr
updateresource.attribute14friendlyname = resource.attribute14friendlyname
updateresource.attribute14format = resource.attribute14format
updateresource.attribute15 = resource.attribute15
updateresource.attribute15expr = resource.attribute15expr
updateresource.attribute15friendlyname = resource.attribute15friendlyname
updateresource.attribute15format = resource.attribute15format
updateresource.attribute16 = resource.attribute16
updateresource.attribute16expr = resource.attribute16expr
updateresource.attribute16friendlyname = resource.attribute16friendlyname
updateresource.attribute16format = resource.attribute16format
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].samlsigningcertname = resource[i].samlsigningcertname
updateresources[i].assertionconsumerserviceurl = resource[i].assertionconsumerserviceurl
updateresources[i].sendpassword = resource[i].sendpassword
updateresources[i].samlissuername = resource[i].samlissuername
updateresources[i].relaystaterule = resource[i].relaystaterule
updateresources[i].signaturealg = resource[i].signaturealg
updateresources[i].digestmethod = resource[i].digestmethod
updateresources[i].audience = resource[i].audience
updateresources[i].nameidformat = resource[i].nameidformat
updateresources[i].nameidexpr = resource[i].nameidexpr
updateresources[i].attribute1 = resource[i].attribute1
updateresources[i].attribute1expr = resource[i].attribute1expr
updateresources[i].attribute1friendlyname = resource[i].attribute1friendlyname
updateresources[i].attribute1format = resource[i].attribute1format
updateresources[i].attribute2 = resource[i].attribute2
updateresources[i].attribute2expr = resource[i].attribute2expr
updateresources[i].attribute2friendlyname = resource[i].attribute2friendlyname
updateresources[i].attribute2format = resource[i].attribute2format
updateresources[i].attribute3 = resource[i].attribute3
updateresources[i].attribute3expr = resource[i].attribute3expr
updateresources[i].attribute3friendlyname = resource[i].attribute3friendlyname
updateresources[i].attribute3format = resource[i].attribute3format
updateresources[i].attribute4 = resource[i].attribute4
updateresources[i].attribute4expr = resource[i].attribute4expr
updateresources[i].attribute4friendlyname = resource[i].attribute4friendlyname
updateresources[i].attribute4format = resource[i].attribute4format
updateresources[i].attribute5 = resource[i].attribute5
updateresources[i].attribute5expr = resource[i].attribute5expr
updateresources[i].attribute5friendlyname = resource[i].attribute5friendlyname
updateresources[i].attribute5format = resource[i].attribute5format
updateresources[i].attribute6 = resource[i].attribute6
updateresources[i].attribute6expr = resource[i].attribute6expr
updateresources[i].attribute6friendlyname = resource[i].attribute6friendlyname
updateresources[i].attribute6format = resource[i].attribute6format
updateresources[i].attribute7 = resource[i].attribute7
updateresources[i].attribute7expr = resource[i].attribute7expr
updateresources[i].attribute7friendlyname = resource[i].attribute7friendlyname
updateresources[i].attribute7format = resource[i].attribute7format
updateresources[i].attribute8 = resource[i].attribute8
updateresources[i].attribute8expr = resource[i].attribute8expr
updateresources[i].attribute8friendlyname = resource[i].attribute8friendlyname
updateresources[i].attribute8format = resource[i].attribute8format
updateresources[i].attribute9 = resource[i].attribute9
updateresources[i].attribute9expr = resource[i].attribute9expr
updateresources[i].attribute9friendlyname = resource[i].attribute9friendlyname
updateresources[i].attribute9format = resource[i].attribute9format
updateresources[i].attribute10 = resource[i].attribute10
updateresources[i].attribute10expr = resource[i].attribute10expr
updateresources[i].attribute10friendlyname = resource[i].attribute10friendlyname
updateresources[i].attribute10format = resource[i].attribute10format
updateresources[i].attribute11 = resource[i].attribute11
updateresources[i].attribute11expr = resource[i].attribute11expr
updateresources[i].attribute11friendlyname = resource[i].attribute11friendlyname
updateresources[i].attribute11format = resource[i].attribute11format
updateresources[i].attribute12 = resource[i].attribute12
updateresources[i].attribute12expr = resource[i].attribute12expr
updateresources[i].attribute12friendlyname = resource[i].attribute12friendlyname
updateresources[i].attribute12format = resource[i].attribute12format
updateresources[i].attribute13 = resource[i].attribute13
updateresources[i].attribute13expr = resource[i].attribute13expr
updateresources[i].attribute13friendlyname = resource[i].attribute13friendlyname
updateresources[i].attribute13format = resource[i].attribute13format
updateresources[i].attribute14 = resource[i].attribute14
updateresources[i].attribute14expr = resource[i].attribute14expr
updateresources[i].attribute14friendlyname = resource[i].attribute14friendlyname
updateresources[i].attribute14format = resource[i].attribute14format
updateresources[i].attribute15 = resource[i].attribute15
updateresources[i].attribute15expr = resource[i].attribute15expr
updateresources[i].attribute15friendlyname = resource[i].attribute15friendlyname
updateresources[i].attribute15format = resource[i].attribute15format
updateresources[i].attribute16 = resource[i].attribute16
updateresources[i].attribute16expr = resource[i].attribute16expr
updateresources[i].attribute16friendlyname = resource[i].attribute16friendlyname
updateresources[i].attribute16format = resource[i].attribute16format
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of tmsamlssoprofile resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = tmsamlssoprofile()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ tmsamlssoprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the tmsamlssoprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = tmsamlssoprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = tmsamlssoprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [tmsamlssoprofile() for _ in range(len(name))]
obj = [tmsamlssoprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = tmsamlssoprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of tmsamlssoprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmsamlssoprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the tmsamlssoprofile resources configured on NetScaler.
"""
try :
obj = tmsamlssoprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of tmsamlssoprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmsamlssoprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Attribute3format:
URI = "URI"
Basic = "Basic"
class Sendpassword:
ON = "ON"
OFF = "OFF"
class Attribute6format:
URI = "URI"
Basic = "Basic"
class Attribute10format:
URI = "URI"
Basic = "Basic"
class Attribute9format:
URI = "URI"
Basic = "Basic"
class Nameidformat:
Unspecified = "Unspecified"
emailAddress = "emailAddress"
X509SubjectName = "X509SubjectName"
WindowsDomainQualifiedName = "WindowsDomainQualifiedName"
kerberos = "kerberos"
entity = "entity"
persistent = "persistent"
Transient = "transient"
class Signaturealg:
RSA_SHA1 = "RSA-SHA1"
RSA_SHA256 = "RSA-SHA256"
class Attribute1format:
URI = "URI"
Basic = "Basic"
class Attribute12format:
URI = "URI"
Basic = "Basic"
class Attribute8format:
URI = "URI"
Basic = "Basic"
class Attribute5format:
URI = "URI"
Basic = "Basic"
class Attribute7format:
URI = "URI"
Basic = "Basic"
class Attribute15format:
URI = "URI"
Basic = "Basic"
class Digestmethod:
SHA1 = "SHA1"
SHA256 = "SHA256"
class Attribute2format:
URI = "URI"
Basic = "Basic"
class Attribute4format:
URI = "URI"
Basic = "Basic"
class Attribute13format:
URI = "URI"
Basic = "Basic"
class Attribute14format:
URI = "URI"
Basic = "Basic"
class Attribute16format:
URI = "URI"
Basic = "Basic"
class Attribute11format:
URI = "URI"
Basic = "Basic"
class tmsamlssoprofile_response(base_response) :
def __init__(self, length=1) :
self.tmsamlssoprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tmsamlssoprofile = [tmsamlssoprofile() for _ in range(length)]
| apache-2.0 | -7,694,855,360,198,886,000 | 33.635432 | 310 | 0.737628 | false |
askeing/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/hsts_wsh.py | 486 | 1784 | # Copyright 2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
request.extra_headers.append(
('Strict-Transport-Security', 'max-age=86400'))
def web_socket_transfer_data(request):
request.ws_stream.send_message('Hello', binary=False)
# vi:sts=4 sw=4 et
| mpl-2.0 | -5,604,237,057,437,617,000 | 43.6 | 72 | 0.770179 | false |
siddharths067/HuHubaProject | lib/urllib3/util/__init__.py | 204 | 1044 | from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
| mit | 8,028,172,173,013,394,000 | 18.333333 | 68 | 0.627395 | false |
SRabbelier/Melange | thirdparty/google_appengine/google/appengine/ext/bulkload/transform.py | 3 | 15896 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader Transform Helper functions.
A collection of helper functions for bulkloading data, typically referenced
from a bulkloader.yaml file.
"""
import base64
import datetime
import os
import re
import tempfile
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.ext.bulkload import bulkloader_errors
CURRENT_PROPERTY = None
KEY_TYPE_NAME = 'name'
KEY_TYPE_ID = 'ID'
# Decorators
def none_if_empty(fn):
"""A decorator which returns None if its input is empty else fn(x).
Useful on import. Can be used in config files
(e.g. "transform.none_if_empty(int)" or as a decorator.
Args:
fn: Single argument transform function.
Returns:
Wrapped function.
"""
def wrapper(value):
if value == '' or value is None or value == []:
return None
return fn(value)
return wrapper
def empty_if_none(fn):
"""A wrapper for a value to return '' if it's None. Useful on export.
Can be used in config files (e.g. "transform.empty_if_none(unicode)" or
as a decorator.
Args:
fn: Single argument transform function.
Returns:
Wrapped function.
"""
def wrapper(value):
if value is None:
return ''
return fn(value)
return wrapper
# Key helpers.
def create_foreign_key(kind, key_is_id=False):
"""A method to make one-level Key objects.
These are typically used in ReferenceProperty in Python, where the reference
value is a key with kind (or model) name name.
This helper method does not support keys with parents. Use create_deep_key
instead to create keys with parents.
Args:
kind: The kind name of the reference as a string.
key_is_id: If true, convert the key into an integer to be used as an id.
If false, leave the key in the input format (typically a string).
Returns:
Single argument method which parses a value into a Key of kind entity_kind.
"""
def generate_foreign_key_lambda(value):
if key_is_id:
value = int(value)
return datastore.Key.from_path(kind, value)
return generate_foreign_key_lambda
def create_deep_key(*path_info):
"""A method to make multi-level Key objects.
Generates multi-level key from multiple fields in the input dictionary.
This is typically used for Keys for entities which have variable parent keys,
e.g. ones with owned relationships. It can used for both __key__ and
references.
Use create_foreign_key as a simpler way to create single level keys.
Args:
path_info: List of tuples, describing (kind, property, is_id=False).
kind: The kind name.
property: The external property in the current import dictionary, or
transform.CURRENT_PROPERTY for the value passed to the transform.
is_id: Converts value to int and treats as numeric ID if True, otherwise
the value is a string name. Default is False.
Example:
create_deep_key(('rootkind', 'rootcolumn'),
('childkind', 'childcolumn', True),
('leafkind', transform.CURRENT_PROPERTY))
Returns:
Transform method which parses the info from the current neutral dictionary
into a Key with parents as described by path_info.
"""
validated_path_info = []
for level_info in path_info:
if len(level_info) == 3:
key_is_id = level_info[2]
elif len(level_info) == 2:
key_is_id = False
else:
raise bulkloader_errors.InvalidConfiguration(
'Each list in create_deep_key must specify exactly 2 or 3 '
'parameters, (kind, property, is_id=False). You specified: %s' %
repr(path_info))
kind_name = level_info[0]
property_name = level_info[1]
validated_path_info.append((kind_name, property_name, key_is_id))
def create_deep_key_lambda(value, bulkload_state):
path = []
for kind_name, property_name, key_is_id in validated_path_info:
if property_name is CURRENT_PROPERTY:
name_or_id = value
else:
name_or_id = bulkload_state.current_dictionary[property_name]
if key_is_id:
name_or_id = int(name_or_id)
path += [kind_name, name_or_id]
return datastore.Key.from_path(*path)
return create_deep_key_lambda
def _key_id_or_name_n(key, index):
"""Internal helper function for key id and name transforms.
Args:
key: A datastore key.
index: The depth in the key to return; 0 is root, -1 is leaf.
Returns:
The id or name of the nth deep sub key in key.
"""
if not key:
return None
path = key.to_path()
if not path:
return None
path_index = (index * 2) + 1
return path[path_index]
def key_id_or_name_as_string_n(index):
"""Pull out the nth (0-based) key id or name from a key which has parents.
If a key is present, return its id or name as a string.
Note that this loses the distinction between integer IDs and strings
which happen to look like integers. Use key_type to distinguish them.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Function extracting the name or ID of the key at depth index, as a unicode
string. Returns '' if key is empty (unsaved), otherwise raises IndexError
if the key is not as deep as described.
"""
def transform_function(key):
id_or_name = _key_id_or_name_n(key, index)
if not id_or_name:
return u''
return unicode(id_or_name)
return transform_function
# # Commonly used helper which returns the value of the leaf key.
key_id_or_name_as_string = key_id_or_name_as_string_n(-1)
def key_type_n(index):
"""Pull out the nth (0-based) key type from a key which has parents.
This is most useful when paired with key_id_or_name_as_string_n.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Method returning the type ('ID' or 'name') of the key at depth index.
Returns '' if key is empty (unsaved), otherwise raises IndexError
if the key is not as deep as described.
"""
def transform_function(key):
id_or_name = _key_id_or_name_n(key, index)
if id_or_name is None:
return ''
if isinstance(id_or_name, basestring):
return KEY_TYPE_NAME
return KEY_TYPE_ID
return transform_function
# # Commonly used helper which returns the type of the leaf key.
key_type = key_type_n(-1)
def key_kind_n(index):
"""Pull out the nth (0-based) key kind from a key which has parents.
This is a useful complement to create_deep_key.
Args:
index: The depth of the id or name to extract. Zero is the root key.
Negative one is the leaf key.
Returns:
Function returning the kind of the key at depth index, or raising
IndexError if the key is not as deep as described.
"""
@empty_if_none
def transform_function(key):
path = key.to_path()
path_index = (index * 2)
return unicode(path[path_index])
return transform_function
# Commonly used helper which returns the kind of the leaf key.
key_kind = key_kind_n(-1)
# Blob and ByteString helpers.
@none_if_empty
def blobproperty_from_base64(value):
"""Return a datastore blob property containing the base64 decoded value."""
decoded_value = base64.b64decode(value)
return datastore_types.Blob(decoded_value)
@none_if_empty
def bytestring_from_base64(value):
"""Return a datastore bytestring property from a base64 encoded value."""
decoded_value = base64.b64decode(value)
return datastore_types.ByteString(decoded_value)
def blob_to_file(filename_hint_propertyname=None,
directory_hint=''):
"""Write the blob contents to a file, and replace them with the filename.
Args:
filename_hint_propertyname: If present, the filename will begin with
the contents of this value in the entity being exported.
directory_hint: If present, the files will be stored in this directory.
Returns:
A function which writes the input blob to a file.
"""
directory = []
def transform_function(value, bulkload_state):
if not directory:
parent_dir = os.path.dirname(bulkload_state.filename)
directory.append(os.path.join(parent_dir, directory_hint))
if directory[0] and not os.path.exists(directory[0]):
os.makedirs(directory[0])
filename_hint = 'blob_'
suffix = ''
filename = ''
if filename_hint_propertyname:
filename_hint = bulkload_state.current_entity[filename_hint_propertyname]
filename = os.path.join(directory[0], filename_hint)
if os.path.exists(filename):
filename = ''
(filename_hint, suffix) = os.path.splitext(filename_hint)
if not filename:
filename = tempfile.mktemp(suffix, filename_hint, directory[0])
f = open(filename, 'wb')
f.write(value)
f.close()
return filename
return transform_function
# Formatted string helpers: Extract, convert to boolean, date, or list.
def import_date_time(format, _strptime=None):
"""A wrapper around strptime. Also returns None if the input is empty.
Args:
format: Format string for strptime.
Returns:
Single argument method which parses a string into a datetime using format.
"""
if not _strptime:
_strptime = datetime.datetime.strptime
def import_date_time_lambda(value):
if not value:
return None
return _strptime(value, format)
return import_date_time_lambda
def export_date_time(format):
"""A wrapper around strftime. Also returns '' if the input is None.
Args:
format: Format string for strftime.
Returns:
Single argument method which convers a datetime into a string using format.
"""
def export_date_time_lambda(value):
if not value:
return ''
return datetime.datetime.strftime(value, format)
return export_date_time_lambda
def regexp_extract(pattern, method=re.match, group=1):
"""Return first group in the value matching the pattern using re.match.
Args:
pattern: A regular expression to match on with at least one group.
method: The method to use for matching; normally re.match or re.search.
group: The group to use for extracting a value.
Returns:
A single argument method which returns the group_arg group matched,
or None if no match was found or the input was empty.
"""
def regexp_extract_lambda(value):
if not value:
return None
matches = method(pattern, value)
if not matches:
return None
return matches.group(group)
return regexp_extract_lambda
def regexp_to_list(pattern):
"""Return function that returns a list of objects that match the regex.
Useful on import. Uses the provided regex to split a string value into a list
of strings. Wrapped by none_if_input_or_result_empty, so returns none if
there are no matches for the regex and none if the input is empty.
Args:
pattern: A regular expression pattern to match against the input string.
Returns:
None if the input was none or no matches were found, otherwise a list of
strings matching the input expression.
"""
@none_if_empty
def regexp_to_list_lambda(value):
result = re.findall(pattern, value)
if result == []:
return None
return result
return regexp_to_list_lambda
def regexp_bool(regexp, flags=0):
"""Return a boolean if the expression matches with re.match.
Note that re.match anchors at the start but not end of the string.
Args:
regexp: String, regular expression.
flags: Optional flags to pass to re.match.
Returns:
Method which returns a Boolean if the expression matches.
"""
def transform_function(value):
return bool(re.match(regexp, value, flags))
return transform_function
def split_string(delimeter):
"""Split a string using the delimeter into a list.
This is just a wrapper for string.split.
Args:
delimeter: The delimiter to split the string on.
Returns:
Method which splits the string into a list along the delimeter.
"""
def split_string_lambda(value):
return value.split(delimeter)
return split_string_lambda
def join_list(delimeter):
"""Join a list into a string using the delimeter.
This is just a wrapper for string.join.
Args:
delimeter: The delimiter to use when joining the string.
Returns:
Method which joins the list into a string with the delimeter.
"""
def join_string_lambda(value):
return delimeter.join(value)
return join_string_lambda
def list_from_multiproperty(*external_names):
"""Create a list from multiple properties.
Args:
external_names: List of the properties to use.
Returns:
Transform function which returns a list of the properties in external_names.
"""
def list_from_multiproperty_lambda(unused_value, bulkload_state):
result = []
for external_name in external_names:
value = bulkload_state.current_dictionary.get(external_name)
if value:
result.append(value)
return result
return list_from_multiproperty_lambda
def property_from_list(index):
"""Return the Nth item from a list, or '' if the list is shorter.
Args:
index: Item in the list to return.
Returns:
Function returning the item from a list, or '' if the list is too short.
"""
@empty_if_none
def property_from_list_lambda(values):
if len(values) > index:
return values[index]
return ''
return property_from_list_lambda
# SimpleXML list Helpers
def list_from_child_node(xpath, suppress_blank=False):
"""Return a list property from child nodes of the current xml node.
This applies only the simplexml helper, as it assumes __node__, the current
ElementTree node corresponding to the import record.
Sample usage for structure:
<Visit>
<VisitActivities>
<Activity>A1</Activity>
<Activity>A2</Activity>
</VisitActivities>
</Visit>
property: activities
external_name: VisitActivities # Ignored on import, used on export.
import_transform: list_from_xml_node('VisitActivities/Activity')
export_transform: child_node_from_list('Activity')
Args:
xpath: XPath to run on the current node.
suppress_blank: if True, ndoes with no text will be skipped.
Returns:
Transform function which works as described in the args.
"""
def list_from_child_node_lambda(unused_value, bulkload_state):
result = []
for node in bulkload_state.current_dictionary['__node__'].findall(xpath):
if node.text:
result.append(node.text)
elif not suppress_blank:
result.append('')
return result
return list_from_child_node_lambda
def child_node_from_list(child_node_name):
"""Return a value suitable for generating an XML child node on export.
The return value is a list of tuples which the simplexml connector will
use to build a child node.
See also list_from_child_node
Args:
child_node_name: The name to use for each child node.
Returns:
Transform function which works as described in the args.
"""
def child_node_from_list_lambda(values):
return [(child_node_name, value) for value in values]
return child_node_from_list_lambda
| apache-2.0 | 5,308,785,058,024,714,000 | 26.080068 | 80 | 0.695835 | false |
anaran/olympia | apps/amo/management/commands/clean_redis.py | 9 | 3337 | import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. You have to pass sys.executable both as the
# thing to run and so argv[0] is set properly.
os.execl(sys.executable, sys.executable, sys.argv[0],
sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
| bsd-3-clause | -8,952,598,293,099,763,000 | 27.042017 | 78 | 0.540306 | false |
dataxu/ansible | lib/ansible/parsing/yaml/dumper.py | 90 | 2246 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText
from ansible.vars.hostvars import HostVars
class AnsibleDumper(yaml.SafeDumper):
'''
A simple stub class that allows us to add representers
for our overridden object types.
'''
pass
def represent_hostvars(self, data):
return self.represent_dict(dict(data))
# Note: only want to represent the encrypted data
def represent_vault_encrypted_unicode(self, data):
return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
if PY3:
represent_unicode = yaml.representer.SafeRepresenter.represent_str
else:
represent_unicode = yaml.representer.SafeRepresenter.represent_unicode
AnsibleDumper.add_representer(
AnsibleUnicode,
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
HostVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
AnsibleSequence,
yaml.representer.SafeRepresenter.represent_list,
)
AnsibleDumper.add_representer(
AnsibleMapping,
yaml.representer.SafeRepresenter.represent_dict,
)
AnsibleDumper.add_representer(
AnsibleVaultEncryptedUnicode,
represent_vault_encrypted_unicode,
)
| gpl-3.0 | 4,392,328,333,244,897,000 | 27.43038 | 118 | 0.765806 | false |
turon/openthread | tools/harness-automation/cases/router_9_2_6.py | 1 | 1878 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_9_2_6(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '9 2 6'
golden_devices_required = 4
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -5,015,353,849,229,359,000 | 39.826087 | 77 | 0.761448 | false |
inspirehep/invenio | modules/bibindex/lib/bibindex_engine_tokenizer_unit_tests.py | 5 | 20948 | # -*- coding:utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""bibindex_engine_tokenizer_tests - unit tests for tokenizers
There should always be at least one test class for each class in b_e_t.
"""
from invenio.testutils import InvenioTestCase
from invenio.testutils import make_test_suite, run_test_suite
from invenio.bibindex_engine_utils import load_tokenizers
_TOKENIZERS = load_tokenizers()
class TestAuthorTokenizerScanning(InvenioTestCase):
"""Test BibIndex name tokenization"""
def setUp(self):
self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]()
self.scan = self.tokenizer.scan_string_for_phrases
def test_bifnt_scan_single(self):
"""BibIndexAuthorTokenizer - scanning single names like 'Dido'"""
teststr = "Dido"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Dido'], 'nonlastnames': [], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_simple_western_forward(self):
"""BibIndexAuthorTokenizer - scanning simple Western-style: first last"""
teststr = "Ringo Starr"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_simple_western_reverse(self):
"""BibIndexAuthorTokenizer - scanning simple Western-style: last, first"""
teststr = "Starr, Ringo"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_forward(self):
"""BibIndexAuthorTokenizer - scanning multiword: first middle last"""
teststr = "Michael Edward Peskin"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dotcrammed(self):
"""BibIndexAuthorTokenizer - scanning multiword: f.m. last"""
teststr = "M.E. Peskin"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dotcrammed_reversed(self):
"""BibIndexAuthorTokenizer - scanning multiword: last, f.m."""
teststr = "Peskin, M.E."
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dashcrammed(self):
"""BibIndexAuthorTokenizer - scanning multiword: first-middle last"""
teststr = "Jean-Luc Picard"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dashcrammed_reversed(self):
"""BibIndexAuthorTokenizer - scanning multiword: last, first-middle"""
teststr = "Picard, Jean-Luc"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_dashes(self):
"""BibIndexAuthorTokenizer - scanning multiword: first middle last-last"""
teststr = "Cantina Octavia Jones-Smith"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_dashes_reverse(self):
"""BibIndexAuthorTokenizer - scanning multiword: last-last, first middle"""
teststr = "Jones-Smith, Cantina Octavia"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_reverse(self):
"""BibIndexAuthorTokenizer - scanning compound last: last last, first"""
teststr = "Alvarez Gaume, Joachim"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_titled(self):
"""BibIndexAuthorTokenizer - scanning title-bearing: last, first, title"""
teststr = "Epstein, Brian, The Fifth Beatle"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_wildly_interesting(self):
"""BibIndexAuthorTokenizer - scanning last last last, first first, title, title"""
teststr = "Ibanez y Gracia, Maria Luisa, II., ed."
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II.', 'ed.'], 'raw' : teststr}
self.assertEqual(output, anticipated)
class TestAuthorTokenizerTokens(InvenioTestCase):
"""Test BibIndex name variant token generation from scanned and tagged sets"""
def setUp(self):
self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]()
self.get_index_tokens = self.tokenizer.parse_scanned_for_phrases
def test_bifnt_tokenize_single(self):
"""BibIndexAuthorTokenizer - tokens for single-word name
Ronaldo
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ronaldo'], 'nonlastnames': [], 'titles': [], 'raw' : 'Ronaldo'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Ronaldo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_simple_forward(self):
"""BibIndexAuthorTokenizer - tokens for first last
Ringo Starr
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Ringo Starr'}
output = self.get_index_tokens(tagged_data)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_simple_reverse(self):
"""BibIndexAuthorTokenizer - tokens for last, first
Starr, Ringo
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Starr, Ringo'}
output = self.get_index_tokens(tagged_data)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_twoname_forward(self):
"""BibIndexAuthorTokenizer - tokens for first middle last
Michael Edward Peskin
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : 'Michael Edward Peskin'}
output = self.get_index_tokens(tagged_data)
anticipated = ['E Peskin', 'Edward Peskin', 'M E Peskin', 'M Edward Peskin', 'M Peskin',
'Michael E Peskin', 'Michael Edward Peskin', 'Michael Peskin',
'Peskin, E', 'Peskin, Edward', 'Peskin, M',
'Peskin, M E', 'Peskin, M Edward', 'Peskin, Michael',
'Peskin, Michael E', 'Peskin, Michael Edward']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_compound_last(self):
"""BibIndexAuthorTokenizer - tokens for last last, first
Alvarez Gaume, Joachim
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : 'Alvarez Gaume, Joachim'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Alvarez Gaume, J', 'Alvarez Gaume, Joachim', 'Alvarez, J', 'Alvarez, Joachim', 'Gaume, J',
'Gaume, Joachim', 'J Alvarez', 'J Alvarez Gaume', 'J Gaume', 'Joachim Alvarez',
'Joachim Alvarez Gaume', 'Joachim Gaume']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_titled(self):
"""BibIndexAuthorTokenizer - tokens for last, first, title
Epstein, Brian, The Fifth Beatle
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : 'Epstein, Brian, The Fifth Beatle'}
output = self.get_index_tokens(tagged_data)
anticipated = ['B Epstein', 'B Epstein, The Fifth Beatle', 'Brian Epstein',
'Brian Epstein, The Fifth Beatle', 'Epstein, B', 'Epstein, B, The Fifth Beatle',
'Epstein, Brian', 'Epstein, Brian, The Fifth Beatle']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_wildly_interesting(self):
"""BibIndexAuthorTokenizer - tokens for last last last, first first, title, title
Ibanez y Gracia, Maria Luisa, II, (ed.)
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II', '(ed.)'], 'raw' : 'Ibanez y Gracia, Maria Luisa, II, (ed.)'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Gracia, L', 'Gracia, Luisa', 'Gracia, M', 'Gracia, M L', 'Gracia, M Luisa',
'Gracia, Maria', 'Gracia, Maria L', 'Gracia, Maria Luisa',
'Ibanez y Gracia, L', 'Ibanez y Gracia, L, II',
'Ibanez y Gracia, Luisa', 'Ibanez y Gracia, Luisa, II',
'Ibanez y Gracia, M', 'Ibanez y Gracia, M L', 'Ibanez y Gracia, M L, II',
'Ibanez y Gracia, M Luisa', 'Ibanez y Gracia, M Luisa, II',
'Ibanez y Gracia, M, II',
'Ibanez y Gracia, Maria',
'Ibanez y Gracia, Maria L', 'Ibanez y Gracia, Maria L, II',
'Ibanez y Gracia, Maria Luisa', 'Ibanez y Gracia, Maria Luisa, II',
'Ibanez y Gracia, Maria, II',
'Ibanez, L', 'Ibanez, Luisa',
'Ibanez, M', 'Ibanez, M L', 'Ibanez, M Luisa', 'Ibanez, Maria',
'Ibanez, Maria L', 'Ibanez, Maria Luisa', 'L Gracia', 'L Ibanez',
'L Ibanez y Gracia', 'L Ibanez y Gracia, II', 'Luisa Gracia', 'Luisa Ibanez',
'Luisa Ibanez y Gracia', 'Luisa Ibanez y Gracia, II', 'M Gracia',
'M Ibanez', 'M Ibanez y Gracia', 'M Ibanez y Gracia, II', 'M L Gracia',
'M L Ibanez', 'M L Ibanez y Gracia', 'M L Ibanez y Gracia, II',
'M Luisa Gracia', 'M Luisa Ibanez', 'M Luisa Ibanez y Gracia', 'M Luisa Ibanez y Gracia, II',
'Maria Gracia',
'Maria Ibanez', 'Maria Ibanez y Gracia', 'Maria Ibanez y Gracia, II',
'Maria L Gracia', 'Maria L Ibanez', 'Maria L Ibanez y Gracia', 'Maria L Ibanez y Gracia, II',
'Maria Luisa Gracia', 'Maria Luisa Ibanez', 'Maria Luisa Ibanez y Gracia',
'Maria Luisa Ibanez y Gracia, II']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_multimiddle_forward(self):
"""BibIndexAuthorTokenizer - tokens for first middle middle last
W K H Panofsky
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Panofsky'], 'nonlastnames': ['W', 'K', 'H'], 'titles': [], 'raw' : 'W K H Panofsky'}
output = self.get_index_tokens(tagged_data)
anticipated = ['H Panofsky', 'K H Panofsky', 'K Panofsky', 'Panofsky, H', 'Panofsky, K',
'Panofsky, K H', 'Panofsky, W', 'Panofsky, W H', 'Panofsky, W K',
'Panofsky, W K H', 'W H Panofsky',
'W K H Panofsky', 'W K Panofsky', 'W Panofsky']
self.assertEqual(output, anticipated)
def test_tokenize(self):
"""BibIndexAuthorTokenizer - check tokenize_for_phrases()
Ringo Starr
"""
teststr = "Ringo Starr"
output = self.tokenizer.tokenize_for_phrases(teststr)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
class TestExactAuthorTokenizer(InvenioTestCase):
"""Test exact author name tokenizer."""
def setUp(self):
"""setup"""
self.tokenizer = _TOKENIZERS["BibIndexExactAuthorTokenizer"]()
self.tokenize = self.tokenizer.tokenize_for_phrases
def test_exact_author_name_tokenizer_bare(self):
"""BibIndexExactNameTokenizer - bare name"""
self.assertEqual(self.tokenize('John Doe'),
['John Doe'])
def test_exact_author_name_tokenizer_dots(self):
"""BibIndexExactNameTokenizer - name with dots"""
self.assertEqual(self.tokenize('J. Doe'),
['J Doe'])
self.assertEqual(self.tokenize('J.R. Doe'),
['J R Doe'])
self.assertEqual(self.tokenize('J. R. Doe'),
['J R Doe'])
def test_exact_author_name_tokenizer_trailing_dots(self):
"""BibIndexExactNameTokenizer - name with trailing dots"""
self.assertEqual(self.tokenize('Doe, J'),
['Doe, J'])
self.assertEqual(self.tokenize('Doe, J.'),
['Doe, J'])
def test_exact_author_name_tokenizer_hyphens(self):
"""BibIndexExactNameTokenizer - name with hyphens"""
self.assertEqual(self.tokenize('Doe, Jean-Pierre'),
['Doe, Jean Pierre'])
class TestCJKTokenizer(InvenioTestCase):
"""Tests for CJK Tokenizer which splits CJK words into characters and treats
every single character as a word"""
@classmethod
def setUp(self):
self.tokenizer = _TOKENIZERS["BibIndexCJKTokenizer"]()
def test_tokenize_for_words_phrase_galaxy(self):
"""tokenizing phrase: galaxy s4据信"""
phrase = "galaxy s4据信"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['galaxy','s4','据','信']), sorted(result))
def test_tokenize_for_words_phrase_with_special_punctuation(self):
"""tokenizing phrase: 马英九:台湾民"""
phrase = u"马英九:台湾民"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['马','英','九','台','湾','民']), sorted(result))
def test_tokenize_for_words_phrase_with_special_punctuation_two(self):
"""tokenizing phrase: 色的“刀子嘴”"""
phrase = u"色的“刀子嘴”"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['色','的','刀','子','嘴']), sorted(result))
def test_tokenize_for_words_simple_phrase(self):
"""tokenizing phrase: 春眠暁覚"""
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁覚')), sorted(['春', '眠', '暁', '覚']))
def test_tokenize_for_words_mixed_phrase(self):
"""tokenizing phrase: 春眠暁ABC覚"""
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁ABC覚')), sorted(['春', '眠', '暁', 'abc', '覚']))
def test_tokenize_for_words_phrase_with_comma(self):
"""tokenizing phrase: 春眠暁, 暁"""
phrase = u"春眠暁, 暁"
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(phrase)), sorted(['春','眠','暁']))
class TestJournalPageTokenizer(InvenioTestCase):
"""Tests for JournalPage Tokenizer"""
@classmethod
def setUp(self):
self.tokenizer = _TOKENIZERS["BibIndexJournalPageTokenizer"]()
def test_tokenize_for_single_page(self):
"""tokenizing for single page"""
test_pairs = [
# simple number
('1', ['1']),
('23', ['23']),
('12312', ['12312']),
# letter + number
('C85', ['C85']),
('L45', ['L45']),
# roman numbers
('VII', ['VII']),
('X', ['X']),
# prefix + simple number
('p.321', ['p.321', '321']),
('pp.321', ['pp.321', '321']),
('cpp.321', ['cpp.321', '321']),
('pag.321', ['pag.321', '321']),
# prefix + non-simple page
('p.A45', ['p.A45', 'A45']),
('pp.C83', ['pp.C83', 'C83']),
('p.V', ['p.V', 'V']),
('pp.IV', ['pp.IV', 'IV']),
]
for phrase, expected_tokens in test_pairs:
result = self.tokenizer.tokenize(phrase)
self.assertEqual(sorted(expected_tokens), sorted(result))
def test_tokenize_for_page_range(self):
"""tokenizing for page range"""
test_pairs = [
# simple number
('1-12', ['1', '1-12']),
('22-22', ['22', '22-22']),
('95-12312', ['95', '95-12312']),
# letter + number
('C85-D55', ['C85', 'C85-D55']),
('L45-L88', ['L45', 'L45-L88']),
# roman numbers
('I-VII', ['I', 'I-VII']),
('VIII-X', ['VIII', 'VIII-X']),
# mixed range
('III-12', ['III', 'III-12']),
('343-A10', ['343', '343-A10']),
('IX-B5', ['IX', 'IX-B5']),
# prefix + simple number
('p.56-123', ['p.56-123', '56-123', '56']),
('pp.56-123', ['pp.56-123', '56-123', '56']),
('cpp.56-123', ['cpp.56-123', '56-123', '56']),
('pag.56-123', ['pag.56-123', '56-123', '56']),
# prefix + non-simple page
('pp.VII-123', ['pp.VII-123', 'VII-123', 'VII']),
]
for phrase, expected_tokens in test_pairs:
result = self.tokenizer.tokenize(phrase)
self.assertEqual(sorted(expected_tokens), sorted(result))
TEST_SUITE = make_test_suite(TestAuthorTokenizerScanning,
TestAuthorTokenizerTokens,
TestExactAuthorTokenizer,
TestCJKTokenizer,
TestJournalPageTokenizer)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| gpl-2.0 | 6,696,568,995,838,467,000 | 47.344186 | 170 | 0.579565 | false |
SpokesmanReview/django-boundaryservice | boundaryservice/management/commands/loadshapefiles.py | 3 | 10135 | import logging
log = logging.getLogger('boundaries.api.load_shapefiles')
from optparse import make_option
import os, os.path
import sys
from zipfile import ZipFile
from tempfile import mkdtemp
from django.conf import settings
from django.contrib.gis.gdal import (CoordTransform, DataSource, OGRGeometry,
OGRGeomType)
from django.core.management.base import BaseCommand
from django.db import connections, DEFAULT_DB_ALIAS, transaction
from boundaryservice.models import BoundarySet, Boundary
DEFAULT_SHAPEFILES_DIR = getattr(settings, 'SHAPEFILES_DIR', 'data/shapefiles')
GEOMETRY_COLUMN = 'shape'
class Command(BaseCommand):
help = 'Import boundaries described by shapefiles.'
option_list = BaseCommand.option_list + (
make_option('-c', '--clear', action='store_true', dest='clear',
help='Clear all jurisdictions in the DB.'),
make_option('-d', '--data-dir', action='store', dest='data_dir',
default=DEFAULT_SHAPEFILES_DIR,
help='Load shapefiles from this directory'),
make_option('-e', '--except', action='store', dest='except',
default=False,
help='Don\'t load these kinds of Areas, comma-delimited.'),
make_option('-o', '--only', action='store', dest='only',
default=False,
help='Only load these kinds of Areas, comma-delimited.'),
make_option('-u', '--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specify a database to load shape data into.'),
)
def get_version(self):
return '0.1'
def handle(self, *args, **options):
# Load configuration
sys.path.append(options['data_dir'])
from definitions import SHAPEFILES
if options['only']:
only = options['only'].upper().split(',')
# TODO: stripping whitespace here because optparse doesn't handle
# it correctly
sources = [s for s in SHAPEFILES
if s.replace(' ', '').upper() in only]
elif options['except']:
exceptions = options['except'].upper().split(',')
# See above
sources = [s for s in SHAPEFILES
if s.replace(' ', '').upper() not in exceptions]
else:
sources = [s for s in SHAPEFILES]
for kind, config in SHAPEFILES.items():
if kind not in sources:
log.info('Skipping %s.' % kind)
continue
log.info('Processing %s.' % kind)
self.load_set(kind, config, options)
@transaction.commit_on_success
def load_set(self, kind, config, options):
log.info('Processing %s.' % kind)
if options['clear']:
bset = None
try:
bset = BoundarySet.objects.get(name=kind)
if bset:
log.info('Clearing old %s.' % kind)
bset.boundaries.all().delete()
bset.delete()
log.info('Loading new %s.' % kind)
except BoundarySet.DoesNotExist:
log.info('No existing boundary set of kind [%s] so nothing to '
'delete' % kind)
path = os.path.join(options['data_dir'], config['file'])
datasources = create_datasources(path)
layer = datasources[0][0]
# Create BoundarySet
log.info("Creating BoundarySet: %s" % kind)
bset = BoundarySet.objects.create(
name=kind,
singular=config['singular'],
kind_first=config['kind_first'],
authority=config['authority'],
domain=config['domain'],
last_updated=config['last_updated'],
href=config['href'],
notes=config['notes'],
count=0,
metadata_fields=layer.fields
)
log.info("Created with slug %s and id %s" % (bset.slug, bset.id))
for datasource in datasources:
log.info("Loading %s from %s" % (kind, datasource.name))
# Assume only a single-layer in shapefile
if datasource.layer_count > 1:
log.warn('%s shapefile [%s] has multiple layers, using first.'
% (datasource.name, kind))
layer = datasource[0]
self.add_boundaries_for_layer(config, layer, bset,
options['database'])
# sync this with reality
bset.count = Boundary.objects.filter(set=bset).count()
bset.save()
log.info('%s count: %i' % (kind, bset.count))
def polygon_to_multipolygon(self, geom):
"""
Convert polygons to multipolygons so all features are homogenous in the
database.
"""
if geom.__class__.__name__ == 'Polygon':
g = OGRGeometry(OGRGeomType('MultiPolygon'))
g.add(geom)
return g
elif geom.__class__.__name__ == 'MultiPolygon':
return geom
else:
raise ValueError('Geom is neither Polygon nor MultiPolygon.')
def add_boundaries_for_layer(self, config, layer, bset, database):
# Get spatial reference system for the postgis geometry field
geometry_field = Boundary._meta.get_field_by_name(GEOMETRY_COLUMN)[0]
SpatialRefSys = connections[database].ops.spatial_ref_sys()
db_srs = SpatialRefSys.objects.using(database).get(
srid=geometry_field.srid).srs
if 'srid' in config and config['srid']:
layer_srs = SpatialRefSys.objects.get(srid=config['srid']).srs
else:
layer_srs = layer.srs
# Simplification can be configured but default is to create simplified
# geometry field by collapsing points within 1/1000th of a degree.
# For reference, Chicago is at approx. 42 degrees latitude this works
# out to a margin of roughly 80 meters latitude and 112 meters
# longitude for Chicago area.
simplification = config.get('simplification', 0.0001)
# Create a convertor to turn the source data into
transformer = CoordTransform(layer_srs, db_srs)
for feature in layer:
log.debug("Processing boundary %s" % feature)
# Transform the geometry to the correct SRS
geometry = self.polygon_to_multipolygon(feature.geom)
geometry.transform(transformer)
# Preserve topology prevents a shape from ever crossing over
# itself.
simple_geometry = geometry.geos.simplify(simplification,
preserve_topology=True)
# Conversion may force multipolygons back to being polygons
simple_geometry = self.polygon_to_multipolygon(simple_geometry.ogr)
# Extract metadata into a dictionary
metadata = {}
for field in layer.fields:
# Decode string fields using encoding specified in definitions
# config
if config['encoding'] != '':
try:
metadata[field] = feature.get(field).decode(
config['encoding'])
# Only strings will be decoded, get value in normal way if
# int etc.
except AttributeError:
metadata[field] = feature.get(field)
else:
metadata[field] = feature.get(field)
external_id = config['ider'](feature)
feature_name = config['namer'](feature)
# If encoding is specified, decode id and feature name
if config['encoding'] != '':
external_id = external_id.decode(config['encoding'])
feature_name = feature_name.decode(config['encoding'])
if config['kind_first']:
display_name = '%s %s' % (config['singular'], feature_name)
else:
display_name = '%s %s' % (feature_name, config['singular'])
Boundary.objects.create(
set=bset,
kind=config['singular'],
external_id=external_id,
name=feature_name,
display_name=display_name,
metadata=metadata,
shape=geometry.wkt,
simple_shape=simple_geometry.wkt,
centroid=geometry.geos.centroid)
def create_datasources(path):
if path.endswith('.zip'):
path = temp_shapefile_from_zip(path)
if path.endswith('.shp'):
return [DataSource(path)]
# assume it's a directory...
sources = []
for fn in os.listdir(path):
fn = os.path.join(path,fn)
if fn.endswith('.zip'):
fn = temp_shapefile_from_zip(fn)
if fn.endswith('.shp'):
sources.append(DataSource(fn))
return sources
def temp_shapefile_from_zip(zip_path):
"""
Given a path to a ZIP file, unpack it into a temp dir and return the path
to the shapefile that was in there. Doesn't clean up after itself unless
there was an error.
If you want to cleanup later, you can derive the temp dir from this path.
"""
log.info("Creating temporary SHP file from %s" % zip_path)
zf = ZipFile(zip_path)
tempdir = mkdtemp()
shape_path = None
# Copy the zipped files to a temporary directory, preserving names.
for name in zf.namelist():
data = zf.read(name)
outfile = os.path.join(tempdir, name)
if name.endswith('.shp'):
shape_path = outfile
f = open(outfile, 'w')
f.write(data)
f.close()
if shape_path is None:
log.warn("No shapefile, cleaning up")
# Clean up after ourselves.
for file in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, file))
os.rmdir(tempdir)
raise ValueError("No shapefile found in zip")
return shape_path
| mit | -7,678,625,108,405,079,000 | 37.101504 | 79 | 0.565861 | false |
pvt88/scrapy-cloud | cobweb/spiders/search_spider_tbds.py | 2 | 2712 | import scrapy
from datetime import datetime
from cobweb.items import PropertyItem
from cobweb.utilities import extract_number, extract_unit, extract_property_id, strip, extract_listing_type
class SearchSpiderTBDS(scrapy.Spider):
name = 'search_spider_tbds'
def __init__(self, vendor=None, crawl_url=None, type=None, max_depth=2, start_index=1, *args, **kwargs):
super(SearchSpiderTBDS, self).__init__(*args, **kwargs)
self.vendor = vendor
self.crawl_url = crawl_url
self.index = int(start_index)
self.type = type
self.listing_type = extract_listing_type(self.crawl_url)
self.max_depth = int(max_depth)
self.start_urls = [self.vendor + self.crawl_url + "/p" + str(self.index)]
def parse(self, response):
if not isinstance(response, scrapy.http.response.html.HtmlResponse):
response = scrapy.http.response.html.HtmlResponse(response.url, body=response.body)
search_results = response.css(u'.col-gr-75per .group-prd li')
for row in search_results:
item = PropertyItem()
item["vendor"] = self.vendor
item["type"] = self.type
item["listing_type"] = self.listing_type
item["created_date"] = datetime.utcnow()
item["last_indexed_date"] = datetime.utcnow()
item["last_crawled_date"] = None
subdomain = row.css(u'.content .title a::attr(href)').extract()
if subdomain:
item["link"] = self.vendor + subdomain[0].strip()
item["property_id"] = extract_property_id(item["link"])
info = row.css(u'.content .info span::text').extract()
if len(info) > 0:
price = info[0].strip()
item["property_price_raw"] = price
item["property_price"] = extract_number(price)
item["property_price_unit"] = extract_unit(price)
if len(info) > 1:
property_size = info[1].strip()
item["property_size_raw"] = property_size
item["property_size"] = extract_number(property_size)
item["property_size_unit"] = extract_unit(property_size)
property_area = row.css(u'.content .fsize-13::text').extract()
if len(property_area) > 1:
item["property_area"] = property_area[1].strip()
item["posted_date"] = None
yield item
if self.index < self.max_depth and len(search_results) > 0:
self.index += 1
next_url = self.vendor + self.crawl_url + "/p" + str(self.index)
yield scrapy.Request(next_url, callback=self.parse)
| gpl-3.0 | -677,495,347,441,557,900 | 40.723077 | 108 | 0.582965 | false |
celadevra/ParaJumper | parajumper/cli/new.py | 1 | 3231 | """module to handle 'new' command."""
import tempfile
import os
import re
from subprocess import call
from clint.textui import prompt, puts, indent, colored
import parajumper.item as item
import parajumper.config as config
import parajumper.db as db
EDITOR = os.environ.get('EDITOR', 'vim')
def dispatch(args):
"""Dispatcher for new command."""
if '-T' in args:
tags = args.value_after('-T').split(',')
else:
tags = None
if not args.flags.has(0):
newitem()
elif '-t' in args:
newtodo(args.value_after('-t'), tags)
elif '-e' in args:
newevent(args.value_after('-e'), tags)
elif '-n' in args:
newnote(args.value_after('-n'), tags)
def newitem(tags=None):
"""Create new item by calling default $EDITOR, read in user input, and parse content."""
conf = config.Config()
bullets = conf.options['bullets']
puts("Please select a bullet for your note.")
puts("Available bullets are:")
for key in bullets:
with indent(4):
puts("%s : %s" % (key, bullets[key]))
bullet = prompt.query("Your choice: ")
initial_message = """<!-- Please enter your note below. You can use markdown -->
<!-- lines starting with '&' and a space are interpreted as tags -->
<!-- tags are separated by spaces, like this:-->
<!-- & history roman hannibal expected_in_test -->"""
notes = ''
if tags is None:
tags = []
tempf = tempfile.NamedTemporaryFile(suffix='.md', mode='w+', encoding='utf-8', delete=False)
tempf.write(initial_message)
tempf.flush()
try:
call([EDITOR, tempf.name])
except FileNotFoundError:
call(['vi', tempf.name])
tempf.close()
with open(tempf.name) as tempf:
for line in tempf:
if line[:4] != '<!--':
if line[:2] != '& ':
notes += line
else:
tags = tags + [x for x in line[2:-1].split(' ') if x != '']
os.remove(tempf.name)
result = item.Item(bullet=bullet, content=re.sub('\n+$', '\n', notes), tags=tags)
db.save_item(result)
puts("New item saved with id = %s" % colored.green(result.identity))
def _find_bullet(what):
"""Find bullet char corresponding to string."""
conf = config.Config()
bullets = conf.options['bullets']
return list(bullets.keys())[list(bullets.values()).index(what)]
def newtodo(note, tags=None):
"""Quickly (non-interactively) create and store a new todo item."""
result = item.Item(bullet=_find_bullet('todo'), content=note, tags=tags)
db.save_item(result)
puts("New item saved with id = %s" % colored.green(result.identity))
def newevent(note, tags=None):
"""Quickly (non-interactively) create and store a new event item."""
result = item.Item(bullet=_find_bullet('event'), content=note, tags=tags)
db.save_item(result)
puts("New item saved with id = %s" % colored.green(result.identity))
def newnote(note, tags=None):
"""Quickly (non-interactively) create and store a new note item."""
result = item.Item(bullet=_find_bullet('notes'), content=note, tags=tags)
db.save_item(result)
puts("New item saved with id = %s" % colored.green(result.identity))
| gpl-3.0 | 3,437,406,899,622,635,000 | 35.303371 | 96 | 0.618694 | false |
fharenheit/template-spark-app | src/main/python/ml/index_to_string_example.py | 123 | 2014 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import IndexToString, StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("IndexToStringExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = indexer.fit(df)
indexed = model.transform(df)
print("Transformed string column '%s' to indexed column '%s'"
% (indexer.getInputCol(), indexer.getOutputCol()))
indexed.show()
print("StringIndexer will store labels in output column metadata\n")
converter = IndexToString(inputCol="categoryIndex", outputCol="originalCategory")
converted = converter.transform(indexed)
print("Transformed indexed column '%s' back to original string column '%s' using "
"labels in metadata" % (converter.getInputCol(), converter.getOutputCol()))
converted.select("id", "categoryIndex", "originalCategory").show()
# $example off$
spark.stop()
| apache-2.0 | -4,261,917,867,060,582,400 | 36.296296 | 86 | 0.695631 | false |
felix1m/pyspotify | spotify/user.py | 3 | 2597 | from __future__ import unicode_literals
import spotify
from spotify import ffi, lib, serialized, utils
__all__ = [
'User',
]
class User(object):
"""A Spotify user.
You can get users from the session, or you can create a :class:`User`
yourself from a Spotify URI::
>>> session = spotify.Session()
# ...
>>> user = session.get_user('spotify:user:jodal')
>>> user.load().display_name
u'jodal'
"""
def __init__(self, session, uri=None, sp_user=None, add_ref=True):
assert uri or sp_user, 'uri or sp_user is required'
self._session = session
if uri is not None:
user = spotify.Link(self._session, uri=uri).as_user()
if user is None:
raise ValueError(
'Failed to get user from Spotify URI: %r' % uri)
sp_user = user._sp_user
add_ref = True
if add_ref:
lib.sp_user_add_ref(sp_user)
self._sp_user = ffi.gc(sp_user, lib.sp_user_release)
def __repr__(self):
return 'User(%r)' % self.link.uri
@property
@serialized
def canonical_name(self):
"""The user's canonical username."""
return utils.to_unicode(lib.sp_user_canonical_name(self._sp_user))
@property
@serialized
def display_name(self):
"""The user's displayable username."""
return utils.to_unicode(lib.sp_user_display_name(self._sp_user))
@property
def is_loaded(self):
"""Whether the user's data is loaded yet."""
return bool(lib.sp_user_is_loaded(self._sp_user))
def load(self, timeout=None):
"""Block until the user's data is loaded.
After ``timeout`` seconds with no results :exc:`~spotify.Timeout` is
raised. If ``timeout`` is :class:`None` the default timeout is used.
The method returns ``self`` to allow for chaining of calls.
"""
return utils.load(self._session, self, timeout=timeout)
@property
def link(self):
"""A :class:`Link` to the user."""
return spotify.Link(
self._session,
sp_link=lib.sp_link_create_from_user(self._sp_user), add_ref=False)
@property
def starred(self):
"""The :class:`Playlist` of tracks starred by the user."""
return self._session.get_starred(self.canonical_name)
@property
def published_playlists(self):
"""The :class:`PlaylistContainer` of playlists published by the
user."""
return self._session.get_published_playlists(self.canonical_name)
| apache-2.0 | 3,371,609,170,819,571,700 | 28.179775 | 79 | 0.589911 | false |
Becksteinlab/MDPOW | mdpow/version.py | 1 | 1579 | # POW package __init__.py
# Copyright (c) 2010 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""\
MDPOW version information
=========================
MDPOW uses `semantic versioning`_ with the release number consisting
of a triplet *MAJOR.MINOR.PATCH*. *PATCH* releases are bug fixes or
updates to docs or meta data only and do not introduce new features or
change the API. Within a *MAJOR* release, the user API is stable
except during the development cycles with MAJOR = 0 where the API may
also change (rarely) between MINOR releases. *MINOR* releases can
introduce new functionality or deprecate old ones.
Development versions will have the suffix *-dev* after the version
string.
.. _semantic versioning: http://semver.org
Accessing release information
-----------------------------
User code should use :func:`get_version` or `get_version_tuple`.
.. autodata:: VERSION
.. autofunction:: get_version
.. autofunction:: get_version_tuple
"""
#: Package version; this is the only place where it is set.
VERSION = 0,7,0
#: Set to ``True`` for a release. If set to ``False`` then the patch level
#: will have the suffix "-dev".
RELEASE = False
if not RELEASE:
VERSION = VERSION[:2] + (str(VERSION[2]) + '-dev',)
def get_version():
"""Return current package version as a string."""
return ".".join(map(str,VERSION))
def get_version_tuple():
"""Return current package version as a tuple (*MAJOR*, *MINOR*, *PATCHLEVEL*)."""
return tuple(map(str,VERSION))
| gpl-3.0 | -4,106,233,672,329,393,000 | 31.895833 | 85 | 0.69791 | false |
mortada/numpy | numpy/core/tests/test_umath_complex.py | 70 | 19916 | from __future__ import division, absolute_import, print_function
import sys
import platform
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
# TODO: FPU exceptions
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
# TODO: replace with a check on whether platform-provided C99 funcs are used
skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
def platform_skip(func):
return dec.skipif(skip_complex_tests,
"Numpy is using complex functions (e.g. sqrt) provided by your"
"platform's C library. However, they do not seem to behave according"
"to C99 -- so C99 tests are skipped.")(func)
class TestCexp(object):
def test_simple(self):
check = check_complex_value
f = np.exp
yield check, f, 1, 0, np.exp(1), 0, False
yield check, f, 0, 1, np.cos(1), np.sin(1), False
ref = np.exp(1) * np.complex(np.cos(1), np.sin(1))
yield check, f, 1, 1, ref.real, ref.imag, False
@platform_skip
def test_special_values(self):
# C99: Section G 6.3.1
check = check_complex_value
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
yield check, f, np.PZERO, 0, 1, 0, False
yield check, f, np.NZERO, 0, 1, 0, False
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
yield check, f, 1, np.inf, np.nan, np.nan
yield check, f, -1, np.inf, np.nan, np.nan
yield check, f, 0, np.inf, np.nan, np.nan
# cexp(inf + 0i) is inf + 0i
yield check, f, np.inf, 0, np.inf, 0
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
ref = np.complex(np.cos(1.), np.sin(1.))
yield check, f, -np.inf, 1, np.PZERO, np.PZERO
ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
ref = np.complex(np.cos(1.), np.sin(1.))
yield check, f, np.inf, 1, np.inf, np.inf
ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform %(z.real, z.imag))
yield _check_ninf_inf, None
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
yield _check_inf_inf, None
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
yield _check_ninf_nan, None
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
yield _check_inf_nan, None
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
yield check, f, np.nan, 1, np.nan, np.nan
yield check, f, np.nan, -1, np.nan, np.nan
yield check, f, np.nan, np.inf, np.nan, np.nan
yield check, f, np.nan, -np.inf, np.nan, np.nan
# cexp(nan + nani) is nan + nani
yield check, f, np.nan, np.nan, np.nan, np.nan
@dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
yield check, f, np.nan, 0, np.nan, 0
class TestClog(TestCase):
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
@platform_skip
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_special_values(self):
xl = []
yl = []
# From C99 std (Sec 6.3.2)
# XXX: check exceptions raised
# --- raise for invalid fails.
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([np.NZERO], dtype=np.complex)
y = np.complex(-np.inf, np.pi)
self.assertRaises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([0], dtype=np.complex)
y = np.complex(-np.inf, 0)
self.assertRaises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
x = np.array([complex(1, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-1, np.inf)], dtype=np.complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
x = np.array([complex(1., np.nan)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
#self.assertRaises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
with np.errstate(invalid='raise'):
x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
#self.assertRaises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
x = np.array([-np.inf + 1j], dtype=np.complex)
y = np.complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
x = np.array([np.inf + 1j], dtype=np.complex)
y = np.complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
y = np.complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
x = np.array([complex(np.nan, 1)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
y = np.complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
xa = np.array(xl, dtype=np.complex)
ya = np.array(yl, dtype=np.complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i])))
class TestCsqrt(object):
def test_simple(self):
# sqrt(1)
yield check_complex_value, np.sqrt, 1, 0, 1, 0
# sqrt(1i)
yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False
# sqrt(-1)
yield check_complex_value, np.sqrt, -1, 0, 0, 1
def test_simple_conjugate(self):
ref = np.conj(np.sqrt(np.complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
yield check_complex_value, f, 1, 1, ref.real, ref.imag, False
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
@platform_skip
def test_special_values(self):
check = check_complex_value
f = np.sqrt
# C99: Sec G 6.4.2
x, y = [], []
# csqrt(+-0 + 0i) is 0 + 0i
yield check, f, np.PZERO, 0, 0, 0
yield check, f, np.NZERO, 0, 0, 0
# csqrt(x + infi) is inf + infi for any x (including NaN)
yield check, f, 1, np.inf, np.inf, np.inf
yield check, f, -1, np.inf, np.inf, np.inf
yield check, f, np.PZERO, np.inf, np.inf, np.inf
yield check, f, np.NZERO, np.inf, np.inf, np.inf
yield check, f, np.inf, np.inf, np.inf, np.inf
yield check, f, -np.inf, np.inf, np.inf, np.inf
yield check, f, -np.nan, np.inf, np.inf, np.inf
# csqrt(x + nani) is nan + nani for any finite x
yield check, f, 1, np.nan, np.nan, np.nan
yield check, f, -1, np.nan, np.nan, np.nan
yield check, f, 0, np.nan, np.nan, np.nan
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
yield check, f, -np.inf, 1, np.PZERO, np.inf
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
yield check, f, np.inf, 1, np.inf, np.PZERO
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
yield _check_ninf_nan, None
# csqrt(+inf + nani) is inf + nani
yield check, f, np.inf, np.nan, np.inf, np.nan
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
yield check, f, np.nan, 0, np.nan, np.nan
yield check, f, np.nan, 1, np.nan, np.nan
yield check, f, np.nan, np.nan, np.nan, np.nan
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
class TestCpow(TestCase):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
p_r[4] = complex(np.inf, np.nan)
# Do the same with numpy complex scalars
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
def test_array(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
p_r[4] = complex(np.inf, np.nan)
# Do the same with numpy arrays
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
class TestCabs(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
x = np.array([1+0j], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
x, y = [], []
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
yield check_real_value, np.abs, np.nan, np.nan, np.nan
x.append(np.nan)
y.append(-np.nan)
yield check_real_value, np.abs, -np.nan, np.nan, np.nan
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
yield check_real_value, np.abs, np.inf, np.nan, np.inf
x.append(-np.inf)
y.append(np.nan)
yield check_real_value, np.abs, -np.inf, np.nan, np.inf
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(np.complex(a, b))
xa = np.array(x, dtype=np.complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
yield check_real_value, f, x[i], y[i], ref
class TestCarg(object):
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
@dec.knownfailureif(True,
"Complex arithmetic with signed zero is buggy on most implementation")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False
yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False
# carg(+0 +- 0i) returns +- 0
yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO
yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO
# carg(x +- 0i) returns +- 0 for x > 0
yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False
yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False
# carg(x +- 0i) returns +- pi for x < 0
yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False
yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False
# carg(+- 0 + yi) returns pi/2 for y > 0
yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False
yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False
# carg(+- 0 + yi) returns -pi/2 for y < 0
yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False
yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False
yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False
# carg(np.inf +- yi) returns +-0 for finite y > 0
yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False
yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False
# carg(x +- np.infi) returns +-pi/2 for finite x
yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False
yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False
# carg(-np.inf +- np.infi) returns +-3pi/4
yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False
yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False
# carg(np.inf +- np.infi) returns +-pi/4
yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False
yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False
# carg(x + yi) returns np.nan if x or y is nan
yield check_real_value, ncu._arg, np.nan, 0, np.nan, False
yield check_real_value, ncu._arg, 0, np.nan, np.nan, False
yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False
yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
if exact:
assert_equal(f(z1), x)
else:
assert_almost_equal(f(z1), x)
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
z2 = np.complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 8,271,814,840,366,816,000 | 36.087523 | 87 | 0.544838 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/real_world_impact/nsfw_urls.py | 113 | 2143 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NSFW urls in the Alexa top 2000 sites."""
nsfw_urls = set([
"http://xhamster.com/",
"http://xvideos.com/",
"http://livejasmin.com/",
"http://pornhub.com/",
"http://redtube.com/",
"http://youporn.com/",
"http://xnxx.com/",
"http://tube8.com/",
"http://youjizz.com/",
"http://adultfriendfinder.com/",
"http://hardsextube.com/",
"http://yourlust.com/",
"http://drtuber.com/",
"http://beeg.com/",
"http://largeporntube.com/",
"http://nuvid.com/",
"http://bravotube.net/",
"http://spankwire.com/",
"http://discreethearts.com/",
"http://keezmovies.com/",
"http://xtube.com/",
"http://alphaporno.com/",
"http://4tube.com/",
"http://nudevista.com/",
"http://porntube.com/",
"http://xhamstercams.com/",
"http://porn.com/",
"http://video-one.com/",
"http://perfectgirls.net/",
"http://slutload.com/",
"http://sunporno.com/",
"http://tnaflix.com/",
"http://pornerbros.com/",
"http://h2porn.com/",
"http://adult-empire.com/",
"http://pornhublive.com/",
"http://sexitnow.com/",
"http://pornsharia.com/",
"http://freeones.com/",
"http://tubegalore.com/",
"http://xvideos.jp/",
"http://brazzers.com/",
"http://fapdu.com/",
"http://pornoxo.com/",
"http://extremetube.com/",
"http://hot-sex-tube.com/",
"http://xhamsterhq.com/",
"http://18andabused.com/",
"http://tubepleasure.com/",
"http://18schoolgirlz.com/",
"http://chaturbate.com/",
"http://motherless.com/",
"http://yobt.com/",
"http://empflix.com/",
"http://hellporno.com/",
"http://ashemaletube.com/",
"http://watchmygf.com/",
"http://redtubelive.com/",
"http://met-art.com/",
"http://gonzoxxxmovies.com/",
"http://shufuni.com/",
"http://vid2c.com/",
"http://dojki.com/",
"http://cerdas.com/",
"http://overthumbs.com/",
"http://xvideoslive.com/",
"http://playboy.com/",
"http://caribbeancom.com/",
"http://tubewolf.com/",
"http://xmatch.com/",
"http://ixxx.com/",
"http://nymphdate.com/",
]) | mit | -5,532,768,227,242,243,000 | 26.139241 | 72 | 0.59916 | false |
RUNDSP/luigi-swf | luigi_swf/examples/task_basic.py | 1 | 1677 | #!/usr/bin/env python
import datetime
import logging
import os.path
from subprocess import call
import luigi
from luigi_swf import cw, LuigiSwfExecutor
logger = logging.getLogger(__name__)
seconds = 1.
minutes = 60. * seconds
hours = 60. * minutes
class DemoBasicTask(luigi.Task):
# Workaround for when the task is in the same file you're executing
__module__ = 'luigi_swf.examples.task_basic'
dt = luigi.DateParameter()
hour = luigi.IntParameter()
# Default values
swf_task_list = 'default'
swf_retries = 0
swf_start_to_close_timeout = None # in seconds
swf_heartbeat_timeout = None # in seconds
# Use luigi_swf.cw.cw_update_workflows() to sync these to CloudWatch.
swf_cw_alarms = [
cw.TaskFailedAlarm(['arn:aws:sns:us-east-1:1234567:alert_ops']),
cw.TaskFailedAlarm(['arn:aws:sns:us-east-1:1234567:alert_ops']),
cw.TaskHasNotCompletedAlarm(
['arn:aws:sns:us-east-1:1234567:alert_ops'], period=2.5 * hours),
]
def output(self):
path = os.path.expanduser('~/luigi-swf-demo-basic-complete')
return luigi.LocalTarget(path)
def run(self):
logger.info('hi | %s', self.dt)
call(['touch', self.output().path])
class DemoBasicWorkflow(luigi.WrapperTask):
dt = luigi.DateParameter()
hour = luigi.IntParameter()
def requires(self):
return DemoBasicTask(dt=self.dt, hour=self.hour)
if __name__ == '__main__':
task = DemoBasicWorkflow(dt=datetime.datetime(2000, 1, 1), hour=0)
domain = 'development'
version = 'unspecified'
ex = LuigiSwfExecutor(domain, version, task)
ex.register()
ex.execute()
| apache-2.0 | 7,685,299,526,347,799,000 | 24.029851 | 77 | 0.654741 | false |
zero-rp/miniblink49 | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py | 34 | 7369 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import re
from webkitpy.layout_tests.models import test_expectations
_log = logging.getLogger(__name__)
class LayoutTestFinder(object):
def __init__(self, port, options):
self._port = port
self._options = options
self._filesystem = self._port.host.filesystem
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def find_tests(self, options, args):
paths = self._strip_test_dir_prefixes(args)
if options.test_list:
paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
test_files = self._port.tests(paths)
return (paths, test_files)
def _strip_test_dir_prefixes(self, paths):
return [self._strip_test_dir_prefix(path) for path in paths if path]
def _strip_test_dir_prefix(self, path):
# Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
# the filesystem uses '\\' as a directory separator.
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
return path
def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "%s" not found' % file)
raise
return tests
@staticmethod
def _strip_comments(line):
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
if self._options.skip_failing_tests:
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
tests_to_skip = set()
elif self._options.skipped != 'always':
# make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
tests_to_skip -= set(paths)
return tests_to_skip
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --run-chunk and --run-part."""
if not self._options.run_chunk and not self._options.run_part:
return test_names, set()
# If the user specifies they just want to run a subset of the tests,
# just grab a subset of the non-skipped tests.
chunk_value = self._options.run_chunk or self._options.run_part
try:
(chunk_num, chunk_len) = chunk_value.split(":")
chunk_num = int(chunk_num)
assert(chunk_num >= 0)
test_size = int(chunk_len)
assert(test_size > 0)
except AssertionError:
_log.critical("invalid chunk '%s'" % chunk_value)
return (None, None)
# Get the number of tests
num_tests = len(test_names)
# Get the start offset of the slice.
if self._options.run_chunk:
chunk_len = test_size
# In this case chunk_num can be really large. We need
# to make the slave fit in the current number of tests.
slice_start = (chunk_num * chunk_len) % num_tests
else:
# Validate the data.
assert(test_size <= num_tests)
assert(chunk_num <= test_size)
# To count the chunk_len, and make sure we don't skip
# some tests, we round to the next value that fits exactly
# all the parts.
rounded_tests = num_tests
if rounded_tests % test_size != 0:
rounded_tests = (num_tests + test_size - (num_tests % test_size))
chunk_len = rounded_tests / test_size
slice_start = chunk_len * (chunk_num - 1)
# It does not mind if we go over test_size.
# Get the end offset of the slice.
slice_end = min(num_tests, slice_start + chunk_len)
tests_to_run = test_names[slice_start:slice_end]
_log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
_log.debug(' last chunk is partial, appending [0:%d]' % extra)
tests_to_run.extend(test_names[0:extra])
return (tests_to_run, set(test_names) - set(tests_to_run))
| apache-2.0 | 145,791,328,630,913,120 | 42.093567 | 134 | 0.627494 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.