repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
xuru/pyvisdk | pyvisdk/do/task_reason_alarm.py | 1 | 1024 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def TaskReasonAlarm(vim, *args, **kwargs):
'''Indicates that the task was queued by an alarm.'''
obj = vim.client.factory.create('ns0:TaskReasonAlarm')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'alarm', 'alarmName', 'entity', 'entityName' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 4,373,901,051,778,541,000 | 30.060606 | 124 | 0.589844 | false |
vsfs/vsfs-bench | vsbench/slurm.py | 1 | 10482 | #!/usr/bin/env python
#SBATCH --time=12:00:00
#SBATCH --mem-per-cpu=1024
#SBATCH --partition=guest
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --cpus-per-task=1
#
# Author: Lei Xu <[email protected]>
#
# TODO(eddyxu): generalize this to all drivers
from __future__ import print_function
from subprocess import check_output
from fabric.api import lcd, local, settings
from fabric.colors import yellow, red
import argparse
import importlib
import os
import sys
import time
sys.path.append('..')
SCRIPT_DIR = os.path.abspath(os.curdir)
VSBENCH = os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'bin/vsbench'))
FABFILE = os.path.join(SCRIPT_DIR, 'fabfile.py')
fabfile = None # module
def prepare_cluster(driver, num_shard):
"""
"""
print(yellow('Preparing cluster..'), file=sys.stderr)
with settings(warn_only=True), lcd(os.path.join(SCRIPT_DIR, driver)):
if driver == 'vsfs':
#local('fab start:%d,%d' % (num_shard, num_shard))
local('fab start:%d,%d' % (2, num_shard))
else:
local('fab start:%d' % num_shard)
def destory_cluster(driver):
print(red('Shutting down the cluster.'), file=sys.stderr)
with settings(warn_only=True), lcd(os.path.join(SCRIPT_DIR, driver)):
local('fab stop')
def populate_namesapce(driver, nfiles, nindices):
"""Populate the namespace with 'nfiles' files and 'nindices' index.
@param driver the name of file search driver. (vsfs/mysql/voltdb..)
@param nfiles number of files in the namespace.
@param nindices number of indexes in the namespace.
"""
print(yellow('Populating namespace...'), file=sys.stderr)
print(yellow('Importing files...'), file=sys.stderr)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op import -records_per_index %d' %
(VSBENCH, driver, driver, fabfile.env['head'], nfiles / 10),
shell=True)
print(yellow('Building %s indices...' % nindices),
file=sys.stderr)
check_output('%s -driver %s -%s_host %s -op create_indices '
'-num_indices %d' %
(VSBENCH, driver, driver, fabfile.env['head'], nindices),
shell=True)
def parallel_run(params, mpi=False, debug=False):
"""Parallelly running clients
"""
if mpi:
run_cmd = 'mpirun --mca orte_base_help_aggregate 0 '
else:
run_cmd = 'srun '
run_cmd += ' %s -driver %s -%s_host %s ' % \
(VSBENCH, args.driver, args.driver, fabfile.env['head'])
run_cmd += params
if mpi:
run_cmd += ' -mpi'
if debug:
print(run_cmd, file=sys.stderr)
print(run_cmd)
check_output(run_cmd, shell=True)
def create_indices(args, num_indices):
print(yellow('Intializing DB and creating indices...'), file=sys.stderr)
driver = args.driver
cmd = '%s -driver %s -%s_host %s -op create_indices -num_indices %d' % \
(VSBENCH, driver, driver, fabfile.env['head'], num_indices)
if driver == 'mysql':
cmd += ' -mysql_schema single'
check_output(cmd, shell=True)
def test_index(args):
"""Test indexing performance
"""
num_indices = 63 # Max indices supported in mongodb
def run_test(args):
"""
"""
params = '-op insert -num_indices 2 -records_per_index %d' % \
(args.total / num_indices)
if args.driver == 'mysql':
params += ' -cal_prefix -mysql_schema single'
parallel_run(params, args.mpi)
driver = args.driver
args.output.write("# Shard\tTotal\tLatency\n")
destory_cluster(args.driver)
time.sleep(3)
shard_confs = map(int, args.shards.split(','))
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(3)
if driver != 'mongodb':
# MongoDB's indices are created when start the cluster. Calling
# "vsbench -op create_indices" crahses the benchmark. Need to
# investigate later.
create_indices(args, num_indices)
print('Importing files.', file=sys.stderr)
params = '-op import -records_per_index %d' % \
(args.total / num_indices)
parallel_run(params)
print('Run insert for %d shard' % shard, file=sys.stderr)
start_time = time.time()
run_test(args)
end_time = time.time()
args.output.write('%d %d %0.2f\n' %
(shard, args.total, end_time - start_time))
args.output.flush()
destory_cluster(args.driver)
def test_open_index(args):
"""Use open loop to test the latency of VSFS.
"""
#ntasks = int(os.environ.get('SLURM_NTASKS'))
driver = 'vsfs'
prepare_cluster(driver, 16)
time.sleep(5)
populate_namesapce(args.driver, args.total, 240)
return
print(yellow('Run insert in open loop'), file=sys.stderr)
params = '%s -driver %s -%s_host %s -op insert ' \
'-num_indices 2 -records_per_index %d -batch_size 1 -latency' % \
(VSBENCH, driver, driver, fabfile.env['head'], args.total)
parallel_run(params)
def test_search(args):
args.output.write("# Shard Latency\n")
num_files = args.nfiles
shard_confs = map(int, args.shards.split(','))
destory_cluster(args.driver)
time.sleep(3)
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(10)
num_indices = 100
populate_namesapce(args.driver, num_files, num_indices)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op insert -num_indices 10 -records_per_index %s' %
(VSBENCH, args.driver, args.driver, fabfile.env['head'],
num_files),
shell=True)
start_time = time.time()
search_cmd = '%s -driver %s -%s_host %s -op search -query "%s"' % \
(VSBENCH, args.driver, args.driver, fabfile.env['head'],
"/foo/bar?index0>10000&index0<20000")
print(search_cmd)
check_output(search_cmd, shell=True)
end_time = time.time()
args.output.write('%d %0.2f\n' % (shard, end_time - start_time))
args.output.flush()
destory_cluster(args.driver)
args.output.close()
def test_open_search(args):
"""Test search latency in open loop.
"""
def run_test(args):
"""
"""
parallel_run('-op open_search -num_indices 20', args.mpi)
args.output.write("# Shard Latency\n")
shard_confs = map(int, args.shards.split(','))
destory_cluster()
time.sleep(3)
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(3)
populate_namesapce(args.driver, 100000, 100)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op insert -num_indices 2 -records_per_index 50000' %
(VSBENCH, args.driver, args.driver, fabfile.env['head']),
shell=True)
start_time = time.time()
run_test(args)
end_time = time.time()
args.output.write('%d %0.2f\n' % (shard, end_time - start_time))
args.output.flush()
destory_cluster()
args.output.close()
def avail_drivers():
drivers = []
for subdir in os.listdir(SCRIPT_DIR):
if os.path.exists(os.path.join(subdir, 'fabfile.py')):
drivers.append(subdir)
return drivers
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage='sbatch -n NUM_CLIENTS %(prog)s [options] TEST',
description='run VSFS benchmark on sandhills (SLURM).',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--driver', metavar='NAME', default='mongodb',
choices=avail_drivers(),
help='available drivers: %(choices)s')
parser.add_argument(
'-s', '--shards', metavar='N0,N1,N2..',
default=','.join(map(str, range(2, 21, 2))),
help='Comma separated string of the numbers of shared servers to '
'test against')
parser.add_argument('--mpi', action="store_true", default=False,
help='use MPI to synchronize clients.')
parser.add_argument('-o', '--output', type=argparse.FileType('w'),
default='slurm_results.txt', metavar='FILE',
help='set output file')
subparsers = parser.add_subparsers(help='Available tests')
parser_index = subparsers.add_parser(
'index', help='test indexing performance',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_index.add_argument(
'-t', '--total', type=int, default=10**7, metavar='NUM',
help='Total number of index records.')
parser_index.add_argument(
'-i', '--index', type=int, default=63, metavar='NUM',
help='Number of indices')
parser_index.add_argument('--id')
parser_index.set_defaults(func=test_index)
parser_open_index = subparsers.add_parser(
'open_index', help='test indexing in open loop to measure latency',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_open_index.add_argument(
'-b', '--batch', type=int, default=1, metavar='NUM',
help='set the batch size')
parser_open_index.add_argument(
'-t', '--total', type=int, default=10**4, metavar='NUM',
help='Set the number of records to index.'
)
parser_open_index.set_defaults(func=test_open_index)
parser_search = subparsers.add_parser(
'search', help='test searching performance',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_search.add_argument(
'-n', '--nfiles', type=int, default=100000, metavar='NUM',
help='set number of files.')
parser_search.set_defaults(func=test_search)
parser_open_search = subparsers.add_parser(
'open_search', help='test searching in open loop to measure latency.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_open_search.add_argument(
'-n', '--nfiles', type=int, default=100000, metavar='NUM',
help='set number of files.')
parser_open_search.set_defaults(func=test_open_search)
args = parser.parse_args()
module_name = 'vsbench.%s.fabfile' % args.driver
fabfile = importlib.import_module(module_name)
args.func(args)
| apache-2.0 | 4,452,980,237,569,532,400 | 35.778947 | 78 | 0.60103 | false |
dgisser/mapio | calendar_sample.py | 1 | 1577 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Calendar API.
Command-line application that retrieves the list of the user's calendars."""
import sys
from oauth2client import client
import googleapiclient
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'calendar', 'v3', __doc__, __file__,
scope='https://www.googleapis.com/auth/calendar.readonly')
try:
page_token = None
while True:
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print calendar_list_entry['summary']
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize.')
if __name__ == '__main__':
main(sys.argv)
| mit | -1,054,553,263,972,978,800 | 32.553191 | 81 | 0.710843 | false |
kyleabeauchamp/EnsemblePaper | code/model_building/benchmark_ALA.py | 1 | 8769 | import experiment_loader
import ALA3
import numpy as np
from fitensemble import lvbp
from fitensemble.utils import validate_pandas_columns
num_threads = 2
num_samples = 20000 # Generate 20,000 MCMC samples
thin = 25 # Subsample (i.e. thin) the MCMC traces by 25X to ensure independent samples
burn = 5000 # Discard the first 5000 samples as "burn-in"
ff = "amber99"
prior = "maxent"
regularization_strength = 3.0
directory = "%s/%s" % (ALA3.data_dir , ff)
out_dir = directory + "/cross_val/"
predictions, measurements, uncertainties = experiment_loader.load(directory)
validate_pandas_columns(predictions, measurements, uncertainties)
lvbp.ne.set_num_threads(num_threads)
lvbp_model = lvbp.MaxEnt_LVBP(predictions.values, measurements.values, uncertainties.values, regularization_strength)
%prun lvbp_model.sample(num_samples, thin=thin, burn=burn)
"""
2,2 with optimized code:
2561713 function calls (2278913 primitive calls) in 88.531 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
80700 37.819 0.000 37.819 0.000 {method 'dot' of 'numpy.ndarray' objects}
20350 17.375 0.001 32.767 0.002 lvbp.py:47(get_populations_from_q)
100702 12.116 0.000 12.116 0.000 {method 'reduce' of 'numpy.ufunc' objects}
20350 10.404 0.001 11.030 0.001 necompiler.py:667(evaluate)
20350 3.474 0.000 28.250 0.001 lvbp.py:23(get_q)
121301/80601 0.669 0.000 76.896 0.001 PyMCObjects.py:434(get_value)
241301/120601 0.565 0.000 86.129 0.001 {method 'get' of 'pymc.LazyFunction.LazyFunction' objects}
20000 0.557 0.000 1.259 0.000 ensemble_fitter.py:36(get_chi2)
20000 0.416 0.000 0.711 0.000 {method 'normal' of 'mtrand.RandomState' objects}
20350 0.379 0.000 5.218 0.000 _methods.py:42(_mean)
121051/60351 0.326 0.000 76.891 0.001 {method 'run' of 'pymc.Container_values.DCValue' objects}
20000 0.325 0.000 86.944 0.004 StepMethods.py:434(step)
20350 0.302 0.000 0.340 0.000 necompiler.py:462(getContext)
20350 0.251 0.000 0.270 0.000 _methods.py:32(_count_reduce_items)
20000 0.246 0.000 7.253 0.000 lvbp.py:211(logp_prior)
121059 0.245 0.000 0.245 0.000 {numpy.core.multiarray.array}
20000 0.232 0.000 0.702 0.000 linalg.py:1868(norm)
80000 0.220 0.000 84.867 0.001 PyMCObjects.py:293(get_logp)
20000 0.208 0.000 1.147 0.000 StepMethods.py:516(propose)
1,1
2563078 function calls (2280230 primitive calls) in 102.982 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
80712 45.111 0.001 45.111 0.001 {method 'dot' of 'numpy.ndarray' objects}
20356 17.069 0.001 37.650 0.002 lvbp.py:47(get_populations_from_q)
20356 15.933 0.001 16.515 0.001 necompiler.py:667(evaluate)
100714 11.504 0.000 11.504 0.000 {method 'reduce' of 'numpy.ufunc' objects}
20356 6.466 0.000 33.979 0.002 lvbp.py:23(get_q)
121313/80601 0.641 0.000 90.150 0.001 PyMCObjects.py:434(get_value)
20000 0.532 0.000 1.201 0.000 ensemble_fitter.py:36(get_chi2)
241313/120601 0.525 0.000 100.705 0.001 {method 'get' of 'pymc.LazyFunction.LazyFunction' objects}
20000 0.395 0.000 0.668 0.000 {method 'normal' of 'mtrand.RandomState' objects}
20356 0.359 0.000 4.870 0.000 _methods.py:42(_mean)
121069/60357 0.329 0.000 90.102 0.001 {method 'run' of 'pymc.Container_values.DCValue' objects}
20000 0.317 0.000 101.158 0.005 StepMethods.py:434(step)
20356 0.291 0.000 0.325 0.000 necompiler.py:462(getContext)
2,2
2563570 function calls (2280546 primitive calls) in 91.036 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
80756 37.402 0.000 37.402 0.000 {method 'dot' of 'numpy.ndarray' objects}
20378 17.369 0.001 32.824 0.002 lvbp.py:47(get_populations_from_q)
100758 11.803 0.000 11.803 0.000 {method 'reduce' of 'numpy.ufunc' objects}
20378 10.529 0.001 11.157 0.001 necompiler.py:667(evaluate)
20378 6.635 0.000 30.884 0.002 lvbp.py:23(get_q)
121357/80601 0.667 0.000 79.525 0.001 PyMCObjects.py:434(get_value)
20000 0.558 0.000 1.269 0.000 ensemble_fitter.py:36(get_chi2)
241357/120601 0.557 0.000 88.637 0.001 {method 'get' of 'pymc.LazyFunction.LazyFunction' objects}
20000 0.422 0.000 0.716 0.000 {method 'normal' of 'mtrand.RandomState' objects}
20378 0.363 0.000 4.970 0.000 _methods.py:42(_mean)
121135/60379 0.334 0.000 79.510 0.001 {method 'run' of 'pymc.Container_values.DCValue' objects}
20000 0.325 0.000 89.299 0.004 StepMethods.py:434(step)
20378 0.310 0.000 0.349 0.000 necompiler.py:462(getContext)
20000 0.243 0.000 7.105 0.000 lvbp.py:211(logp_prior)
20378 0.237 0.000 0.255 0.000 _methods.py:32(_count_reduce_items)
3,3
2560555 function calls (2277875 primitive calls) in 91.947 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
80670 38.771 0.000 38.771 0.000 {method 'dot' of 'numpy.ndarray' objects}
20335 17.648 0.001 32.077 0.002 lvbp.py:47(get_populations_from_q)
100672 12.042 0.000 12.042 0.000 {method 'reduce' of 'numpy.ufunc' objects}
20335 9.396 0.000 10.026 0.000 necompiler.py:667(evaluate)
20335 6.848 0.000 31.664 0.002 lvbp.py:23(get_q)
121271/80601 0.654 0.000 79.951 0.001 PyMCObjects.py:434(get_value)
20000 0.570 0.000 1.246 0.000 ensemble_fitter.py:36(get_chi2)
241271/120601 0.554 0.000 89.549 0.001 {method 'get' of 'pymc.LazyFunction.LazyFunction' objects}
20000 0.431 0.000 0.739 0.000 {method 'normal' of 'mtrand.RandomState' objects}
20335 0.378 0.000 5.049 0.000 _methods.py:42(_mean)
20000 0.339 0.000 90.368 0.005 StepMethods.py:434(step)
20335 0.315 0.000 0.352 0.000 necompiler.py:462(getContext)
121006/60336 0.304 0.000 79.885 0.001 {method 'run' of 'pymc.Container_values.DCValue' objects}
20335 0.242 0.000 0.261 0.000 _methods.py:32(_count_reduce_items)
20000 0.234 0.000 0.676 0.000 linalg.py:1868(norm)
121014 0.225 0.000 0.225 0.000 {numpy.core.multiarray.array}
20000 0.216 0.000 1.185 0.000 StepMethods.py:516(propose)
1 0.193 0.193 91.946 91.946 MCMC.py:252(_loop)
80000 0.182 0.000 88.268 0.001 PyMCObjects.py:293(get_logp)
20000 0.178 0.000 7.728 0.000 lvbp.py:211(logp_prior)
40000 0.163 0.000 88.654 0.002 Node.py:23(logp_of_set)
20000 0.159 0.000 0.217 0.000 PyMCObjects.py:768(set_value)
4,4
2563612 function calls (2280716 primitive calls) in 94.110 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
80724 39.526 0.000 39.526 0.000 {method 'dot' of 'numpy.ndarray' objects}
20362 17.875 0.001 32.333 0.002 lvbp.py:47(get_populations_from_q)
100726 12.323 0.000 12.323 0.000 {method 'reduce' of 'numpy.ufunc' objects}
20362 9.443 0.000 10.092 0.000 necompiler.py:667(evaluate)
20362 7.371 0.000 32.412 0.002 lvbp.py:23(get_q)
121325/80601 0.702 0.000 82.448 0.001 PyMCObjects.py:434(get_value)
241325/120601 0.585 0.000 91.603 0.001 {method 'get' of 'pymc.LazyFunction.LazyFunction' objects}
20000 0.570 0.000 1.288 0.000 ensemble_fitter.py:36(get_chi2)
20000 0.429 0.000 0.741 0.000 {method 'normal' of 'mtrand.RandomState' objects}
20362 0.382 0.000 5.138 0.000 _methods.py:42(_mean)
121087/60363 0.341 0.000 82.410 0.001 {method 'run' of 'pymc.Container_values.DCValue' objects}
20000 0.337 0.000 92.352 0.005 StepMethods.py:434(step)
20362 0.318 0.000 0.357 0.000 necompiler.py:462(getContext)
20362 0.246 0.000 0.264 0.000 _methods.py:32(_count_reduce_items)
20000 0.242 0.000 7.107 0.000 lvbp.py:211(logp_prior)
20000 0.241 0.000 0.717 0.000 linalg.py:1868(norm)
121095 0.234 0.000 0.234 0.000 {numpy.core.multiarray.array}
"""
| gpl-3.0 | -1,653,505,764,016,225,800 | 52.797546 | 117 | 0.635306 | false |
nadenislamarre/recalbox-configgen | configgen/generators/dosbox/dosboxGenerator.py | 1 | 1197 | #!/usr/bin/env python
import Command
import recalboxFiles
from generators.Generator import Generator
import os.path
import glob
class DosBoxGenerator(Generator):
def getResolution(self, config):
return 'default'
# Main entry of the module
# Return command
def generate(self, system, rom, playersControllers, gameResolution):
# Find rom path
gameDir = rom
batFile = gameDir + "/dosbox.bat"
gameConfFile = gameDir + "/dosbox.cfg"
commandArray = [recalboxFiles.recalboxBins[system.config['emulator']],
"-userconf",
"-exit",
"""{}""".format(batFile),
"-c", """set ROOT={}""".format(gameDir)]
if os.path.isfile(gameConfFile):
commandArray.append("-conf")
commandArray.append("""{}""".format(gameConfFile))
else:
commandArray.append("-conf")
commandArray.append("""{}""".format(recalboxFiles.dosboxConfig))
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
return Command.Command(array=commandArray, env={"SDL_VIDEO_GL_DRIVER":"/usr/lib/libGLESv2.so"})
| mit | 604,286,931,525,195,000 | 33.2 | 103 | 0.629073 | false |
jeroanan/GameCollection | Tests/Interactors/TestSearchInteractor.py | 1 | 1059 | from Interactors.Interactor import Interactor
from Interactors.Search.Params.SearchInteractorParams import SearchInteractorParams
from Interactors.Search.SearchInteractor import SearchInteractor
from Tests.Interactors.InteractorTestBase import InteractorTestBase
class TestSearchInteractor(InteractorTestBase):
def setUp(self):
super().setUp()
self.__target = SearchInteractor()
self.__target.persistence = self.persistence
def test_is_interactor(self):
self.assertIsInstance(self.__target, Interactor)
def test_execute_calls_persistence_method(self):
self.__execute()
self.persistence.search.assert_called_with(search_term="search", sort_field="title", sort_dir="asc", user_id="userid")
def __execute(self, search_term="search", sort_field="title", sort_dir="asc", user_id="userid"):
p = SearchInteractorParams()
p.search_term = search_term
p.sort_field = sort_field
p.sort_direction = sort_dir
p.user_id = user_id
self.__target.execute(p)
| gpl-3.0 | -8,598,317,548,667,415,000 | 38.222222 | 126 | 0.708215 | false |
abeconnelly/untap | scripts/collect_samples_from_profile_data.py | 1 | 2707 | #!/usr/bin/python
import subprocess as sp
import sys
import json
import re
import csv
import os
debug=False
header=False
URL="https://my.pgp-hms.org"
if len(sys.argv)<2:
print "provide HTML file to parse"
sys.exit(1)
fn = sys.argv[1]
if debug: print "# processing:", fn
if len(sys.argv)<3:
print "provide huID"
sys.exit(1)
huid = sys.argv[2]
with open(fn) as ifp:
#ifp = open(fn)
pup_json = json.loads(sp.check_output(['pup', 'h3:contains("Samples") + div table tbody json{}'], stdin=ifp))
ready = False
CollectionEvent = []
curEvent = {}
curEvent["human_id"] = huid
curEvent["log"] = []
#.[0].children[].children[0].children
if len(pup_json) == 0:
sys.exit(0)
data = pup_json[0]["children"]
for x in data:
z = x["children"][0]
tag = z["tag"]
if tag == "th":
h = z["children"][0]
txt = h["text"]
href = h["href"]
if ready:
CollectionEvent.append(curEvent)
curEvent = {}
curEvent["human_id"] = huid
curEvent["log"] = []
ready = True
curEvent["href"] = URL + href
curEvent["text"] = txt
if debug: print "+++", href, txt
else:
description = re.sub(r'\s+', ' ', z["text"]).strip()
curEvent["description"] = description
ens = z["children"][1]["children"][0]["children"][0]["children"]
if debug: print ">>>", description
for en in ens:
en_date = ""
en_value = ""
en_descr = ""
if "children" not in en:
continue
row = en["children"]
if (len(row)>0) and ("text" in row[0]):
en_date = row[0]["text"]
if (len(row)>1) and ("text" in row[1]):
en_value = row[1]["text"]
if (len(row)>2) and ("text" in row[2]):
en_descr = row[2]["text"]
#en_date = en["children"][0]["text"]
#en_value = en["children"][1]["text"]
#en_descr = en["children"][2]["text"]
curEvent["log"].append( { "date" : en_date, "value" : en_value, "description" : en_descr })
if debug: print ">>>", en_date, ":", en_value, ":", en_descr
continue
CollectionEvent.append(curEvent)
if debug: print json.dumps(CollectionEvent)
writer = csv.writer(sys.stdout, delimiter='\t', lineterminator="\n")
if header:
writer.writerow([ "human_id", "href", "text", "description", "log_date", "log_text", "log_description" ])
for ev in CollectionEvent:
for log in ev["log"]:
writer.writerow([ ev["human_id"], ev["href"], ev["text"], ev["description"], log["date"], log["value"], log["description"] ])
| agpl-3.0 | 6,023,896,672,139,526,000 | 23.169643 | 133 | 0.531954 | false |
edeposit/marcxml2mods | setup.py | 1 | 1723 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from setuptools import setup, find_packages
from docs import getVersion
# Variables ===================================================================
CHANGELOG = open('CHANGES.rst').read()
LONG_DESCRIPTION = "\n\n".join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
CHANGELOG
])
# Functions ===================================================================
setup(
name='marcxml2mods',
version=getVersion(CHANGELOG),
description="Conversion from MARCXML/OAI to MODS, which is used in NK CZ.",
long_description=LONG_DESCRIPTION,
url='https://github.com/edeposit/marcxml2mods',
author='Edeposit team',
author_email='[email protected]',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
# scripts=[''],
zip_safe=False,
install_requires=[
"lxml",
"xmltodict",
"pydhtmlparser>=2.1.4",
"marcxml_parser",
"remove_hairs",
],
extras_require={
"test": [
"pytest"
],
"docs": [
"sphinx",
"sphinxcontrib-napoleon",
]
},
)
| mit | -1,199,779,438,846,476,800 | 23.971014 | 79 | 0.506674 | false |
buffer/thug | thug/Classifier/URLClassifier.py | 1 | 2222 | #!/usr/bin/env python
#
# URLClassifier.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Original code written by Thorsten Sick <[email protected]>
# from Avira (developed for the iTES Project http://ites-project.org)
#
# Modified by Angelo Dell'Aera:
# - Designed the more generic Classifier module and embedded this
# module into such module
# - Converted to YARA rules
import logging
from .BaseClassifier import BaseClassifier
log = logging.getLogger("Thug")
class URLClassifier(BaseClassifier):
default_rule_file = "rules/urlclassifier.yar"
default_filter_file = "rules/urlfilter.yar"
_classifier = "URL Classifier"
def __init__(self):
BaseClassifier.__init__(self)
def classify(self, url):
for match in self.rules.match(data = url):
self.matches.append((url, match))
if self.discard_url_match(url, match):
continue
self.handle_match_etags(match)
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("url", url, rule, tags, meta)
for c in self.custom_classifiers:
self.custom_classifiers[c](url)
def filter(self, url):
ret = False
for match in self.filters.match(data = url):
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("urlfilter", url, rule, tags, meta)
ret = True
return ret
| gpl-2.0 | -949,605,761,486,316,300 | 31.676471 | 78 | 0.653015 | false |
ntim/g4sipm | sample/run/luigi/dynamic_range_simulation_bretz.py | 1 | 2691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import luigi
import sqlite3
import glob
import numpy as np
import json
from simulation_meta_task import *
class DynamicRangeSimulation(SimulationDynamicMetaTask, luigi.Task):
name = luigi.Parameter("dynamic-range-simulation-bretz")
n_repititions = luigi.IntParameter(10000)
step = luigi.IntParameter(5000)
n_min = luigi.IntParameter(1) # minimum number of photons
n_max = luigi.IntParameter(1000000) # maximum number of photons
base_run_kwargs = luigi.Parameter("{}")
def run_kwargs(self):
kwargs = dict(exe="../fast/fast", persist_hits=False, noise_if_no_signal=True)
kwargs.update(json.loads(self.base_run_kwargs))
# print json.loads(self.base_run_kwargs)
# Dice number of particles
n = np.random.random_integers(self.n_min, self.n_max, self.step)
return [clone(kwargs, n_particles=ni) for ni in n]
def run_after_yield(self):
# Open results.
inputs = self.sqlite_from_runs()
with self.output().open("w") as o:
for input in inputs:
con = sqlite3.connect(input.fn)
cur = con.cursor()
try:
n_particles, t_min, t_max = cur.execute("SELECT nParticles, tMin, tMax FROM particleSourceMessenger;").fetchone()
n_eff_cells = np.sum(cur.execute("SELECT weight FROM `g4sipmDigis-0` WHERE time >= %s AND time < %s;" % (t_min, t_max)).fetchall())
print >> o, n_particles, n_eff_cells
except Exception as e:
print "Failure in", input.fn
print e
class All(luigi.WrapperTask):
def requires(self):
model = "../sample/resources/hamamatsu-s13360-1325pe.properties"
kwargs = [dict(temperature=10, bias_voltage=(52.19 + 5.00), path_spec="10-deg"),
dict(temperature=25, bias_voltage=(53.00 + 5.00), path_spec="25-deg"),
dict(temperature=40, bias_voltage=(53.81 + 5.00), path_spec="40-deg"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 - 0.054), path_spec="25-deg-0.054-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 + 0.054), path_spec="25-deg+0.054-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 - 0.005), path_spec="25-deg-0.005-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 + 0.005), path_spec="25-deg+0.005-V")
]
return [DynamicRangeSimulation(model=model, path_spec=kw["path_spec"], base_run_kwargs=json.dumps(kw)) for kw in kwargs]
if __name__ == "__main__":
luigi.run(main_task_cls=All)
| gpl-3.0 | -3,709,466,599,450,708,000 | 46.210526 | 151 | 0.599034 | false |
jcabdala/fades | tests/test_main.py | 1 | 9135 | # Copyright 2015-2019 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
"""Tests for some code in main."""
import os
import unittest
from unittest.mock import patch
from pkg_resources import Requirement
from fades import VERSION, FadesError, __version__, main, parsing
from tests import create_tempfile
class VirtualenvCheckingTestCase(unittest.TestCase):
"""Tests for the virtualenv checker."""
def test_have_realprefix(self):
resp = main.detect_inside_virtualenv('prefix', 'real_prefix', 'base_prefix')
self.assertTrue(resp)
def test_no_baseprefix(self):
resp = main.detect_inside_virtualenv('prefix', None, None)
self.assertFalse(resp)
def test_prefix_is_baseprefix(self):
resp = main.detect_inside_virtualenv('prefix', None, 'prefix')
self.assertFalse(resp)
def test_prefix_is_not_baseprefix(self):
resp = main.detect_inside_virtualenv('prefix', None, 'other prefix')
self.assertTrue(resp)
class DepsGatheringTestCase(unittest.TestCase):
"""Tests for the gathering stage of consolidate_dependencies."""
def test_needs_ipython(self):
d = main.consolidate_dependencies(needs_ipython=True, child_program=None,
requirement_files=None, manual_dependencies=None)
self.assertDictEqual(d, {'pypi': {Requirement.parse('ipython')}})
def test_child_program(self):
child_program = 'tests/test_files/req_module.py'
d = main.consolidate_dependencies(needs_ipython=False, child_program=child_program,
requirement_files=None, manual_dependencies=None)
self.assertDictEqual(d, {'pypi': {Requirement.parse('foo'), Requirement.parse('bar')}})
def test_requirement_files(self):
requirement_files = [create_tempfile(self, ['dep'])]
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=requirement_files,
manual_dependencies=None)
self.assertDictEqual(d, {'pypi': {Requirement.parse('dep')}})
def test_manual_dependencies(self):
manual_dependencies = ['dep']
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=None,
manual_dependencies=manual_dependencies)
self.assertDictEqual(d, {'pypi': {Requirement.parse('dep')}})
class DepsMergingTestCase(unittest.TestCase):
"""Tests for the merging stage of consolidate_dependencies."""
def test_two_different(self):
requirement_files = [create_tempfile(self, ['1', '2'])]
manual_dependencies = ['vcs::3', 'vcs::4']
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=requirement_files,
manual_dependencies=manual_dependencies)
self.assertEqual(d, {
'pypi': {Requirement.parse('1'), Requirement.parse('2')},
'vcs': {parsing.VCSDependency('3'), parsing.VCSDependency('4')}
})
def test_two_same_repo(self):
requirement_files = [create_tempfile(self, ['1', '2'])]
manual_dependencies = ['3', '4']
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=requirement_files,
manual_dependencies=manual_dependencies)
self.assertDictEqual(d, {
'pypi': {Requirement.parse('1'), Requirement.parse('2'), Requirement.parse('3'),
Requirement.parse('4')}
})
def test_complex_case(self):
child_program = create_tempfile(self, ['"""fades:', '1', '2', '"""'])
requirement_files = [create_tempfile(self, ['3', 'vcs::5'])]
manual_dependencies = ['vcs::4', 'vcs::6']
d = main.consolidate_dependencies(needs_ipython=False, child_program=child_program,
requirement_files=requirement_files,
manual_dependencies=manual_dependencies)
self.assertEqual(d, {
'pypi': {Requirement.parse('1'), Requirement.parse('2'), Requirement.parse('3')},
'vcs': {parsing.VCSDependency('5'), parsing.VCSDependency('4'),
parsing.VCSDependency('6')}
})
def test_one_duplicated(self):
requirement_files = [create_tempfile(self, ['2', '2'])]
manual_dependencies = None
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=requirement_files,
manual_dependencies=manual_dependencies)
self.assertDictEqual(d, {
'pypi': {Requirement.parse('2')}
})
def test_two_different_with_dups(self):
requirement_files = [create_tempfile(self, ['1', '2', '2', '2'])]
manual_dependencies = ['vcs::3', 'vcs::4', 'vcs::1', 'vcs::2']
d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
requirement_files=requirement_files,
manual_dependencies=manual_dependencies)
self.assertEqual(d, {
'pypi': {Requirement.parse('1'), Requirement.parse('2')},
'vcs': {parsing.VCSDependency('1'), parsing.VCSDependency('2'),
parsing.VCSDependency('3'), parsing.VCSDependency('4')}
})
class MiscTestCase(unittest.TestCase):
"""Miscellaneous tests."""
def test_version_show(self):
self.assertEqual(
__version__,
'.'.join([str(v) for v in VERSION]),
)
class ChildProgramDeciderTestCase(unittest.TestCase):
"""Check how the child program is decided."""
def test_indicated_with_executable_flag(self):
analyzable, child = main.decide_child_program(True, "foobar.py")
self.assertIsNone(analyzable)
self.assertEqual(child, "foobar.py")
def test_no_child_at_all(self):
analyzable, child = main.decide_child_program(False, None)
self.assertIsNone(analyzable)
self.assertIsNone(child)
def test_normal_child_program(self):
child_path = create_tempfile(self, "")
analyzable, child = main.decide_child_program(False, child_path)
self.assertEqual(analyzable, child_path)
self.assertEqual(child, child_path)
def test_normal_child_program_not_found(self):
with self.assertRaises(FadesError):
main.decide_child_program(False, 'does_not_exist.py')
def test_normal_child_program_no_access(self):
child_path = create_tempfile(self, "")
os.chmod(child_path, 333) # Remove read permission.
self.addCleanup(os.chmod, child_path, 644)
with self.assertRaises(FadesError):
main.decide_child_program(False, 'does_not_exist.py')
def test_remote_child_program_simple(self):
with patch('fades.helpers.download_remote_script') as mock:
mock.return_value = "new_path_script"
analyzable, child = main.decide_child_program(False, "http://scripts.com/foobar.py")
mock.assert_called_with("http://scripts.com/foobar.py")
# check that analyzable and child are the same, and that its content is the remote one
self.assertEqual(analyzable, "new_path_script")
self.assertEqual(child, "new_path_script")
def test_remote_child_program_ssl(self):
with patch('fades.helpers.download_remote_script') as mock:
mock.return_value = "new_path_script"
analyzable, child = main.decide_child_program(False, "https://scripts.com/foobar.py")
mock.assert_called_with("https://scripts.com/foobar.py")
# check that analyzable and child are the same, and that its content is the remote one
self.assertEqual(analyzable, "new_path_script")
self.assertEqual(child, "new_path_script")
def test_indicated_with_executable_flag_in_path(self):
"""Absolute paths not allowed when using --exec."""
with self.assertRaises(FadesError):
main.decide_child_program(True, os.path.join("path", "foobar.py"))
| gpl-3.0 | 6,745,923,289,832,959,000 | 40.707763 | 97 | 0.617802 | false |
kivy/plyer | plyer/facades/spatialorientation.py | 1 | 1652 | # coding=utf-8
class SpatialOrientation:
'''Spatial Orientation facade.
Computes the device's orientation based on the rotation matrix.
.. versionadded:: 1.3.1
'''
@property
def orientation(self):
'''Property that returns values of the current device orientation
as a (azimuth, pitch, roll) tuple.
Azimuth, angle of rotation about the -z axis. This value represents the
angle between the device's y axis and the magnetic north pole.
The range of values is -π to π.
Pitch, angle of rotation about the x axis. This value represents the
angle between a plane parallel to the device's screen and a plane
parallel to the ground.
The range of values is -π to π.
Roll, angle of rotation about the y axis. This value represents the
angle between a plane perpendicular to the device's screen and a plane
perpendicular to the ground.
The range of values is -π/2 to π/2.
Returns (None, None, None) if no data is currently available.
Supported Platforms:: Android
'''
return self._get_orientation() or (None, None, None)
def _get_orientation(self):
raise NotImplementedError()
def enable_listener(self):
'''Enable the orientation sensor.
'''
self._enable_listener()
def _enable_listener(self, **kwargs):
raise NotImplementedError()
def disable_listener(self):
'''Disable the orientation sensor.
'''
self._disable_listener()
def _disable_listener(self, **kwargs):
raise NotImplementedError()
| mit | 2,274,473,714,694,799,000 | 29.481481 | 79 | 0.64277 | false |
RedFantom/ttkwidgets | ttkwidgets/linklabel.py | 1 | 4045 | """
Author: RedFantom
License: GNU GPLv3
Source: This repository
"""
# Based on an idea by Nelson Brochado (https://www.github.com/nbrol/tkinter-kit)
import tkinter as tk
from tkinter import ttk
import webbrowser
class LinkLabel(ttk.Label):
"""
A :class:`ttk.Label` that can be clicked to open a link with a default blue color, a purple color when clicked and a bright
blue color when hovering over the Label.
"""
def __init__(self, master=None, **kwargs):
"""
Create a LinkLabel.
:param master: master widget
:param link: link to be opened
:type link: str
:param normal_color: text color when widget is created
:type normal_color: str
:param hover_color: text color when hovering over the widget
:type hover_color: str
:param clicked_color: text color when link is clicked
:type clicked_color: str
:param kwargs: options to be passed on to the :class:`ttk.Label` initializer
"""
self._cursor = kwargs.pop("cursor", "hand1")
self._link = kwargs.pop("link", "")
self._normal_color = kwargs.pop("normal_color", "#0563c1")
self._hover_color = kwargs.pop("hover_color", "#057bc1")
self._clicked_color = kwargs.pop("clicked_color", "#954f72")
ttk.Label.__init__(self, master, **kwargs)
self.config(foreground=self._normal_color)
self.__clicked = False
self.bind("<Button-1>", self.open_link)
self.bind("<Enter>", self._on_enter)
self.bind("<Leave>", self._on_leave)
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def _on_enter(self, *args):
"""Set the text color to the hover color."""
self.config(foreground=self._hover_color, cursor=self._cursor)
def _on_leave(self, *args):
"""Set the text color to either the normal color when not clicked or the clicked color when clicked."""
if self.__clicked:
self.config(foreground=self._clicked_color)
else:
self.config(foreground=self._normal_color)
self.config(cursor="")
def reset(self):
"""Reset Label to unclicked status if previously clicked."""
self.__clicked = False
self._on_leave()
def open_link(self, *args):
"""Open the link in the web browser."""
if "disabled" not in self.state():
webbrowser.open(self._link)
self.__clicked = True
self._on_leave()
def cget(self, key):
"""
Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~LinkLabel.keys`.
"""
if key is "link":
return self._link
elif key is "hover_color":
return self._hover_color
elif key is "normal_color":
return self._normal_color
elif key is "clicked_color":
return self._clicked_color
else:
return ttk.Label.cget(self, key)
def configure(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~LinkLabel.keys`.
See :meth:`~LinkLabel.__init__` for a description of the widget specific option.
"""
self._link = kwargs.pop("link", self._link)
self._hover_color = kwargs.pop("hover_color", self._hover_color)
self._normal_color = kwargs.pop("normal_color", self._normal_color)
self._clicked_color = kwargs.pop("clicked_color", self._clicked_color)
ttk.Label.configure(self, **kwargs)
self._on_leave()
def keys(self):
"""Return a list of all resource names of this widget."""
keys = ttk.Label.keys(self)
keys.extend(["link", "normal_color", "hover_color", "clicked_color"])
return keys
| gpl-3.0 | -3,760,860,831,437,255,700 | 34.482456 | 127 | 0.596292 | false |
DStauffman/dstauffman2 | dstauffman2/archery/tournaments/scripts/script_testing.py | 1 | 5404 | """
Main test script for running the code.
Created on Mon Dec 15 13:21:07 2014
@author: DStauffman
"""
#%% Imports
import os
from dstauffman import setup_dir
import dstauffman2.archery.tournaments as arch
#%% Variables
# simulate stuff?
SIMULATE = True
# folder and file locations
folder = arch.get_root_dir()
output_folder = os.path.realpath(os.path.join(folder, '..', 'output'))
file = r'Test_Case_4.xlsx'
test_file = os.path.join(folder, 'tests', file)
if not os.path.isfile(test_file):
raise ValueError('The specfied test file "{}" was not found.'.format(test_file))
output_file = os.path.join(output_folder, file)
output_file_indiv = output_file.replace('.xlsx', '_indiv.csv')
output_file_teams = output_file.replace('.xlsx', '_teams.csv')
output_file_mixed = output_file.replace('.xlsx', '_mixed.csv')
# HTML output files
file_registered = os.path.join(output_folder, 'Registered Archers.htm')
file_reg_bales = os.path.join(output_folder, 'Individual Bale Assignments.htm')
file_indiv_res = os.path.join(output_folder, 'Individual Results.htm')
# bracket lists
# ex: Individual Brackets Female Bare Bow.htm
file_list_indiv = {}
file_list_teams = {}
file_list_mixed = {}
for div in arch.DIVISIONS:
for sex in arch.GENDERS:
file_list_indiv[sex + ' ' + div] = os.path.join(output_folder, 'Individual Brackets ' + sex + ' ' + div + '.htm')
file_list_teams[sex + ' ' + div] = os.path.join(output_folder, 'Team Brackets ' + sex + ' ' + div + '.htm')
if sex == arch.GENDERS[0]:
file_list_mixed[div] = os.path.join(output_folder, 'Mixed Team Brackets ' + div + '.htm')
#%% Output folder
# create the output folder if it doesn't already exist
if not os.path.isdir(output_folder):
setup_dir(output_folder)
#%% Process data
# import data from excel
data_indiv = arch.import_from_excel(test_file, sheet=arch.SHEET_NAME_INDIV)
data_teams = arch.import_from_excel(test_file, sheet=arch.SHEET_NAME_TEAMS)
data_mixed = arch.import_from_excel(test_file, sheet=arch.SHEET_NAME_MIXED)
# display some information
arch.display_info(data_indiv)
# write out list of registered archers
arch.write_registered_archers(data_indiv, filename=file_registered, show_bales=False)
# assign to bales
data_indiv = arch.assign_bales(data_indiv)
# validate bales
arch.validate_bales(data_indiv)
# write out list of registered archers with bale assignments now included
arch.write_registered_archers(data_indiv, filename=file_reg_bales, show_bales=True)
# enter scores (simulated)
if SIMULATE:
arch.simulate_individual_scores(data_indiv)
else:
# read by in from updated excel file
pass
# determine individual seeds
data_indiv = arch.update_indiv(data_indiv)
# display final individual rankings
arch.write_indiv_results(data_indiv, filename=file_indiv_res)
# update information for teams based on individual results
data_teams = arch.update_teams(data_indiv, data_teams)
data_mixed = arch.update_mixed(data_indiv, data_mixed)
#%% Brackets
# assign bracket bales based on seeds/number of archers in each division and field layout
# TODO: write this
## validate all brackets & bales
#arch.validate_brackets(data_indiv, team='indiv')
#arch.validate_brackets(data_teams, team='teams')
#arch.validate_brackets(data_mixed, team='mixed')
#
## write initial brackets for individual competition
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
#
## enter individual bracket scores (simulated for each round), start with 1/16 round
#if SIMULATE:
# arch.simulate_bracket_scores(data_indiv, round_='1/16')
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
## update brackets 1/8 round and rewrite brackets
#if SIMULATE:
# arch.simulate_bracket_scores(data_indiv, round_='1/8')
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
## update brackets 1/4 (quarter-final) round and rewrite brackets
#if SIMULATE:
# arch.simulate_bracket_scores(data_indiv, round_='1/4')
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
## update brackets 1/2 (semi-final) round and rewrite brackets
#if SIMULATE:
# arch.simulate_bracket_scores(data_indiv, round_='1/2')
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
## update brackets 1 (final) round and produce final results brackets
#if SIMULATE:
# arch.simulate_bracket_scores(data_indiv, round_='1/1')
#arch.write_brackets(data_indiv, filename=file_list_indiv, team='indiv')
#
## enter mixed team bracket scores (simulated for each round)
##TODO: repeat again
## write initial brackets for team competition
##arch.write_brackets(data_teams, filename=file_list_teams, team='teams')
#
## enter team bracket scores (simulated for each round)
##TODO: repeat again
## write initial brackets for mixed team competition
##arch.write_brackets(data_mixed, filename=file_list_mixed, team='mixed')
# write updated information to output (CSV)
arch.export_to_excel(data_indiv, output_file_indiv)
arch.export_to_excel(data_teams, output_file_teams)
arch.export_to_excel(data_mixed, output_file_mixed)
# write updated information to output (Excel) (Must write all sheets at once?)
arch.export_to_excel([data_indiv, data_teams, data_mixed], output_file, \
sheet=[arch.SHEET_NAME_INDIV, arch.SHEET_NAME_TEAMS, arch.SHEET_NAME_MIXED])
| lgpl-3.0 | 382,572,098,133,474,240 | 38.15942 | 121 | 0.725944 | false |
klahnakoski/esReplicate | pyLibrary/queries/expression_compiler.py | 1 | 1182 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from pyLibrary import convert
from mo_logs import Log
from mo_dots import coalesce, Data
from mo_times.dates import Date
true = True
false = False
null = None
EMPTY_DICT = {}
def compile_expression(source):
"""
THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE
:param source: PYTHON SOURCE CODE
:return: PYTHON FUNCTION
"""
# FORCE MODULES TO BE IN NAMESPACE
_ = coalesce
_ = Date
_ = convert
_ = Log
_ = Data
_ = EMPTY_DICT
_ = re
output = None
exec """
def output(row, rownum=None, rows=None):
try:
return """ + source + """
except Exception as e:
Log.error("Problem with dynamic function {{func|quote}}", func= """ + convert.value2quote(source) + """, cause=e)
"""
return output
| mpl-2.0 | -7,833,379,894,178,711,000 | 21.730769 | 122 | 0.652284 | false |
ywangd/stash | bin/grep.py | 1 | 2282 | # -*- coding: utf-8 -*-
"""Search a regular expression pattern in one or more files"""
from __future__ import print_function
import argparse
import collections
import fileinput
import os
import re
import sys
def main(args):
global _stash
ap = argparse.ArgumentParser()
ap.add_argument('pattern', help='the pattern to match')
ap.add_argument('files', nargs='*', help='files to be searched')
ap.add_argument('-i', '--ignore-case', action='store_true', help='ignore case while searching')
ap.add_argument('-v', '--invert', action='store_true', help='invert the search result')
ap.add_argument('-c', '--count', action='store_true', help='count the search results instead of normal output')
ns = ap.parse_args(args)
flags = 0
if ns.ignore_case:
flags |= re.IGNORECASE
pattern = re.compile(ns.pattern, flags=flags)
# Do not try to grep directories
files = [f for f in ns.files if not os.path.isdir(f)]
fileinput.close() # in case it is not closed
try:
counts = collections.defaultdict(int)
for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")):
if bool(pattern.search(line)) != ns.invert:
if ns.count:
counts[fileinput.filename()] += 1
else:
if ns.invert: # optimize: if ns.invert, then no match, so no highlight color needed
newline = line
else:
newline = re.sub(pattern, lambda m: _stash.text_color(m.group(), 'red'), line)
if fileinput.isstdin():
fmt = u'{lineno}: {line}'
else:
fmt = u'{filename}: {lineno}: {line}'
print(fmt.format(filename=fileinput.filename(), lineno=fileinput.filelineno(), line=newline.rstrip()))
if ns.count:
for filename, count in counts.items():
fmt = u'{count:6} {filename}'
print(fmt.format(filename=filename, count=count))
except Exception as err:
print("grep: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
finally:
fileinput.close()
if __name__ == "__main__":
main(sys.argv[1:])
| mit | -3,807,069,574,870,944,300 | 34.65625 | 122 | 0.574934 | false |
wkoszek/puzzles | pattern_search/woogle_index.py | 1 | 1936 | #!/usr/bin/env python3
import getopt
import sys
import re
import random
import sqlite3
def main():
g_input_fn = False
g_do_search = False
g_dict_fn = False
g_words = []
try:
opts, args = getopt.getopt(sys.argv[1:],
"hi:d:s:v",
["help", "input=", "dict=", "search="])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
g_input_fn = a
elif o in ("-s", "--search"):
g_do_search = a;
elif o in ("-d", "--dict"):
g_dict_fn = a
else:
assert False, "unhandled option"
if g_input_fn == False:
print("You must pass --input to indicate where DB is");
sys.exit(2);
if g_do_search == False and g_dict_fn == False:
print("You must either pass --input with --dict");
sys.exit(2)
random.seed(14)
conn = sqlite3.connect(g_input_fn);
c = conn.cursor()
if g_do_search == False:
assert(g_dict_fn != None);
print("# initializing database " + g_dict_fn);
with open(g_dict_fn, "r") as f:
g_words += [ [line, random.randint(0, 1000)]
for line in f.read().split("\n")
if not re.match("^$", line)]
f.close()
c.execute("DROP TABLE IF EXISTS words");
c.execute('''CREATE TABLE words(word text, score real)''')
for word in g_words:
if len(word) <= 0:
continue;
c.execute("""
INSERT INTO words VALUES('{0}','{1}');
""".format(word[0], word[1]));
conn.commit();
conn.close();
else:
# From http://stackoverflow.com/questions/5071601/how-do-i-use-regex-in-a-sqlite-query
def match(expr, item):
return re.match(expr, item) is not None
conn.create_function("MATCH", 2, match)
c.execute("""
SELECT * FROM words
WHERE MATCH('.*{0}.*', word)
ORDER BY score DESC LIMIT 10;
""".format(g_do_search));
for v, r in c.fetchall():
print(v, r)
if __name__ == "__main__":
main()
| bsd-2-clause | 6,192,522,204,303,101,000 | 21.776471 | 88 | 0.594008 | false |
tengqm/senlin-container | senlin/engine/service.py | 1 | 95655 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import functools
import uuid
from docker import Client
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import context as senlin_context
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common.i18n import _LE
from senlin.common.i18n import _LI
from senlin.common import messaging as rpc_messaging
from senlin.common import scaleutils as su
from senlin.common import schema
from senlin.common import utils
from senlin.db import api as db_api
from senlin.engine.actions import base as action_mod
from senlin.engine import cluster as cluster_mod
from senlin.engine import cluster_policy
from senlin.engine import dispatcher
from senlin.engine import environment
from senlin.engine import health_manager
from senlin.engine import node as node_mod
from senlin.engine import receiver as receiver_mod
from senlin.engine import scheduler
from senlin.policies import base as policy_base
from senlin.profiles import base as profile_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def request_context(func):
@functools.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
if ctx is not None and not isinstance(ctx,
senlin_context.RequestContext):
ctx = senlin_context.RequestContext.from_dict(ctx.to_dict())
try:
return func(self, ctx, *args, **kwargs)
except exception.SenlinException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
class EngineService(service.Service):
'''Lifecycle manager for a running service engine.
- All the contained methods here are called from the RPC client.
- If a RPC call does not have a corresponding method here, an exception
will be thrown.
- Arguments to these calls are added dynamically and will be treated as
keyword arguments by the RPC client.
'''
def __init__(self, host, topic, manager=None):
super(EngineService, self).__init__()
self.host = host
self.topic = topic
self.dispatcher_topic = consts.ENGINE_DISPATCHER_TOPIC
self.health_mgr_topic = consts.ENGINE_HEALTH_MGR_TOPIC
# The following are initialized here and will be assigned in start()
# which happens after the fork when spawning multiple worker processes
self.engine_id = None
self.TG = None
self.target = None
self._rpc_server = None
# Intialize the global environment
environment.initialize()
def init_tgm(self):
self.TG = scheduler.ThreadGroupManager()
def start(self):
self.engine_id = str(uuid.uuid4())
self.init_tgm()
# create a dispatcher RPC service for this engine.
self.dispatcher = dispatcher.Dispatcher(self,
self.dispatcher_topic,
consts.RPC_API_VERSION,
self.TG)
LOG.info(_LI("Starting dispatcher for engine %s"), self.engine_id)
self.dispatcher.start()
# create a health manager RPC service for this engine.
self.health_mgr = health_manager.HealthManager(
self, self.health_mgr_topic, consts.RPC_API_VERSION)
LOG.info(_LI("Starting health manager for engine %s"), self.engine_id)
self.health_mgr.start()
target = oslo_messaging.Target(version=consts.RPC_API_VERSION,
server=self.host,
topic=self.topic)
self.target = target
self._rpc_server = rpc_messaging.get_rpc_server(target, self)
self._rpc_server.start()
self.service_manage_cleanup()
self.TG.add_timer(cfg.CONF.periodic_interval,
self.service_manage_report)
super(EngineService, self).start()
def _stop_rpc_server(self):
# Stop RPC connection to prevent new requests
LOG.info(_LI("Stopping engine service..."))
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info(_LI('Engine service stopped successfully'))
except Exception as ex:
LOG.error(_LE('Failed to stop engine service: %s'),
six.text_type(ex))
def stop(self):
self._stop_rpc_server()
# Notify dispatcher to stop all action threads it started.
LOG.info(_LI("Stopping dispatcher for engine %s"), self.engine_id)
self.dispatcher.stop()
# Notify health_manager to stop
LOG.info(_LI("Stopping health manager for engine %s"), self.engine_id)
self.health_mgr.stop()
self.TG.stop()
super(EngineService, self).stop()
def service_manage_report(self):
ctx = senlin_context.get_admin_context()
try:
svc = db_api.service_update(ctx, self.engine_id)
# if svc is None, means it's not created.
if svc is None:
params = dict(host=self.host,
binary='senlin-engine',
service_id=self.engine_id,
topic=self.topic)
db_api.service_create(ctx, **params)
except Exception as ex:
LOG.error(_LE('Service %(service_id)s update failed: %(error)s'),
{'service_id': self.engine_id, 'error': ex})
def service_manage_cleanup(self):
ctx = senlin_context.get_admin_context()
last_updated_window = (2 * cfg.CONF.periodic_interval)
time_line = timeutils.utcnow() - datetime.timedelta(
seconds=last_updated_window)
svcs = db_api.service_get_all(ctx)
for svc in svcs:
if svc['id'] == self.engine_id:
continue
if svc['updated_at'] < time_line:
# hasn't been updated, assuming it's died.
LOG.info(_LI('Service %s was aborted'), svc['id'])
db_api.service_delete(ctx, svc['id'])
@request_context
def credential_create(self, context, cred, attrs=None):
"""Create the credential based on the context.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param cred: A credential to be associated with the user identity
provided in the context.
:param dict attrs: Optional attributes associated with the credential.
:return: A dictionary containing the persistent credential.
"""
values = {
'user': context.user,
'project': context.project,
'cred': {
'openstack': {
'trust': cred
}
}
}
db_api.cred_create_update(context, values)
return {'cred': cred}
@request_context
def credential_get(self, context, query=None):
"""Get the credential based on the context.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param dict query: Optional query parameters.
:return: A dictionary containing the persistent credential, or None
if no matching credential is found.
"""
res = db_api.cred_get(context, context.user, context.project)
if res is None:
return None
return res.cred.get('openstack', None)
@request_context
def credential_update(self, context, cred, **attrs):
"""Update a credential based on the context and provided value.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param dict attrs: Optional attribute values to be associated with
the credential.
:return: A dictionary containing the updated credential.
"""
db_api.cred_update(context, context.user, context.project,
{'cred': {'openstack': {'trust': cred}}})
return {'cred': cred}
@request_context
def get_revision(self, context):
return cfg.CONF.revision['senlin_engine_revision']
@request_context
def profile_type_list(self, context):
"""List known profile type implementations.
:param context: An instance of the request context.
:return: A list of profile types.
"""
return environment.global_env().get_profile_types()
@request_context
def profile_type_get(self, context, type_name):
"""Get the details about a profile type.
:param context: An instance of the request context.
:param type_name: The name of a profile type.
:return: The details about a profile type.
"""
profile = environment.global_env().get_profile(type_name)
data = profile.get_schema()
return {
'name': type_name,
'schema': data,
}
def profile_find(self, context, identity, project_safe=True):
"""Find a profile with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:param project_safe: A boolean indicating whether profile from
projects other than the requesting one can be
returned.
:return: A DB object of profile or an exception `ProfileNotFound` if
no matching object is found.
"""
if uuidutils.is_uuid_like(identity):
profile = db_api.profile_get(context, identity,
project_safe=project_safe)
if not profile:
profile = db_api.profile_get_by_name(context, identity,
project_safe=project_safe)
else:
profile = db_api.profile_get_by_name(context, identity,
project_safe=project_safe)
if not profile:
profile = db_api.profile_get_by_short_id(
context, identity, project_safe=project_safe)
if not profile:
raise exception.ProfileNotFound(profile=identity)
return profile
@request_context
def profile_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List profiles matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of profiles to
return in a response.
:param marker: An UUID specifying the profile after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether profiles from all
projects will be returned.
:return: A list of `Profile` object representations.
"""
limit = utils.parse_int_param(consts.PARAM_LIMIT, limit)
utils.validate_sort_param(sort, consts.PROFILE_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
profiles = profile_base.Profile.load_all(context,
limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [p.to_dict() for p in profiles]
@request_context
def profile_create(self, context, name, spec, metadata=None):
"""Create a profile with the given properties.
:param context: An instance of the request context.
:param name: The name for the profile to be created.
:param spec: A dictionary containing the spec for the profile.
:param metadata: A dictionary containing optional key-value pairs to
be associated with the profile.
:return: A dictionary containing the details of the profile object
created.
"""
if cfg.CONF.name_unique:
if db_api.profile_get_by_name(context, name):
msg = _("A profile named '%(name)s' already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
try:
plugin = environment.global_env().get_profile(type_str)
except exception.ProfileTypeNotFound:
msg = _("The specified profile type (%(name)s) is not found."
) % {"name": type_str}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating profile %(type)s '%(name)s'."),
{'type': type_str, 'name': name})
kwargs = {
'user': context.user,
'project': context.project,
'domain': context.domain,
'metadata': metadata,
}
profile = plugin(name, spec, **kwargs)
try:
profile.validate()
except exception.InvalidSpec as ex:
msg = six.text_type(ex)
LOG.error(_LE("Failed in creating profile: %s"), msg)
raise exception.BadRequest(msg=msg)
profile.store(context)
LOG.info(_LI("Profile %(name)s is created: %(id)s."),
{'name': name, 'id': profile.id})
return profile.to_dict()
@request_context
def profile_get(self, context, identity):
"""Retrieve the details about a profile.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:return: A dictionary containing the policy details, or an exception
of type `ProfileNotFound` if no matching object is found.
"""
db_profile = self.profile_find(context, identity)
profile = profile_base.Profile.load(context, profile=db_profile)
return profile.to_dict()
@request_context
def profile_update(self, context, profile_id, name=None, metadata=None):
"""Update the properties of a given profile.
:param context: An instance of the request context.
:param profile_id: The UUID, name or short-id of a profile.
:param name: The new name for the profile.
:param metadata: A dictionary of key-value pairs to be associated with
the profile.
:returns: A dictionary containing the details of the updated profile,
or an exception `ProfileNotFound` if no matching profile is
found.
"""
LOG.info(_LI("Updating profile '%(id)s.'"), {'id': profile_id})
db_profile = self.profile_find(context, profile_id)
profile = profile_base.Profile.load(context, profile=db_profile)
changed = False
if name is not None and name != profile.name:
profile.name = name
changed = True
if metadata is not None and metadata != profile.metadata:
profile.metadata = metadata
changed = True
if changed:
profile.store(context)
LOG.info(_LI("Profile '%(id)s' is updated."), {'id': profile_id})
return profile.to_dict()
@request_context
def profile_delete(self, context, identity):
"""Delete the specified profile.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:return: None if succeeded or an exception of `ResourceInUse` if
profile is referenced by certain clusters/nodes.
"""
db_profile = self.profile_find(context, identity)
LOG.info(_LI("Deleting profile '%s'."), identity)
try:
profile_base.Profile.delete(context, db_profile.id)
except exception.ResourceBusyError:
LOG.error(_LI("The profile '%s' cannot be deleted."), identity)
raise exception.ResourceInUse(resource_type='profile',
resource_id=db_profile.id)
LOG.info(_LI("Profile '%(id)s' is deleted."), {'id': identity})
@request_context
def policy_type_list(self, context):
"""List known policy type implementations.
:param context: An instance of the request context.
:return: A list of policy types.
"""
return environment.global_env().get_policy_types()
@request_context
def policy_type_get(self, context, type_name):
"""Get the details about a policy type.
:param context: An instance of the request context.
:param type_name: The name of a policy type.
:return: The details about a policy type.
"""
policy_type = environment.global_env().get_policy(type_name)
data = policy_type.get_schema()
return {
'name': type_name,
'schema': data
}
def policy_find(self, context, identity, project_safe=True):
"""Find a policy with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:param project_safe: A boolean indicating whether policies from
projects other than the requesting one should be
evaluated.
:return: A DB object of policy or an exception of `PolicyNotFound` if
no matching object is found.
"""
if uuidutils.is_uuid_like(identity):
policy = db_api.policy_get(context, identity,
project_safe=project_safe)
if not policy:
policy = db_api.policy_get_by_name(context, identity,
project_safe=project_safe)
else:
policy = db_api.policy_get_by_name(context, identity,
project_safe=project_safe)
if not policy:
policy = db_api.policy_get_by_short_id(
context, identity, project_safe=project_safe)
if not policy:
raise exception.PolicyNotFound(policy=identity)
return policy
@request_context
def policy_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List policies matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of policies to
return in a response.
:param marker: An UUID specifying the policy after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether policies from all
projects will be returned.
:return: A list of `Policy` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.POLICY_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
policies = policy_base.Policy.load_all(context,
limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [p.to_dict() for p in policies]
@request_context
def policy_create(self, context, name, spec):
"""Create a policy with the given name and spec.
:param context: An instance of the request context.
:param name: The name for the policy to be created.
:param spec: A dictionary containing the spec for the policy.
:return: A dictionary containing the details of the policy object
created.
"""
if cfg.CONF.name_unique:
if db_api.policy_get_by_name(context, name):
msg = _("A policy named '%(name)s' already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
try:
plugin = environment.global_env().get_policy(type_str)
except exception.PolicyTypeNotFound:
msg = _("The specified policy type (%(name)s) is not found."
) % {"name": type_str}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating policy %(type)s '%(name)s'"),
{'type': type_str, 'name': name})
kwargs = {
'user': context.user,
'project': context.project,
'domain': context.domain,
}
policy = plugin(name, spec, **kwargs)
try:
policy.validate()
except exception.InvalidSpec as ex:
msg = six.text_type(ex)
LOG.error(_LE("Failed in creating policy: %s"), msg)
raise exception.BadRequest(msg=msg)
policy.store(context)
LOG.info(_LI("Policy '%(name)s' is created: %(id)s."),
{'name': name, 'id': policy.id})
return policy.to_dict()
@request_context
def policy_get(self, context, identity):
"""Retrieve the details about a policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:return: A dictionary containing the policy details, or an exception
of type `PolicyNotFound` if no matching object is found.
"""
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, db_policy=db_policy)
return policy.to_dict()
@request_context
def policy_update(self, context, identity, name):
"""Update the properties of a given policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:param name: The new name for the policy.
:returns: A dictionary containing the details of the updated policy or
an exception `PolicyNotFound` if no matching poicy is found,
or an exception `BadRequest` if name is not provided.
"""
if not name:
msg = _('Policy name not specified.')
raise exception.BadRequest(msg=msg)
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, db_policy=db_policy)
if name != policy.name:
LOG.info(_LI("Updating policy '%s'."), identity)
policy.name = name
policy.store(context)
LOG.info(_LI("Policy '%s' is updated."), identity)
return policy.to_dict()
@request_context
def policy_delete(self, context, identity):
"""Delete the specified policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:return: None if succeeded or an exception of `ResourceInUse` if
policy is still attached to certain clusters.
"""
db_policy = self.policy_find(context, identity)
LOG.info(_LI("Delete policy '%s'."), identity)
try:
policy_base.Policy.delete(context, db_policy.id)
except exception.ResourceBusyError:
LOG.error(_LI("Policy '%s' cannot be deleted."), identity)
raise exception.ResourceInUse(resource_type='policy',
resource_id=db_policy.id)
LOG.info(_LI("Policy '%s' is deleted."), identity)
def cluster_find(self, context, identity, project_safe=True):
"""Find a cluster with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short ID of a cluster.
:param project_safe: A boolean parameter specifying whether only
clusters from the same project are qualified to
be returned.
:return: An instance of `Cluster` class.
:raises: `ClusterNotFound` if no matching object can be found.
"""
if uuidutils.is_uuid_like(identity):
cluster = db_api.cluster_get(context, identity,
project_safe=project_safe)
if not cluster:
cluster = db_api.cluster_get_by_name(context, identity,
project_safe=project_safe)
else:
cluster = db_api.cluster_get_by_name(context, identity,
project_safe=project_safe)
# maybe it is a short form of UUID
if not cluster:
cluster = db_api.cluster_get_by_short_id(
context, identity, project_safe=project_safe)
if not cluster:
raise exception.ClusterNotFound(cluster=identity)
return cluster
@request_context
def cluster_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List clusters matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the cluster after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether clusters from all
projects will be returned.
:return: A list of `Cluster` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.CLUSTER_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
clusters = cluster_mod.Cluster.load_all(context, limit=limit,
marker=marker, sort=sort,
filters=filters,
project_safe=project_safe)
return [cluster.to_dict() for cluster in clusters]
@request_context
def cluster_get(self, context, identity):
"""Retrieve the cluster specified.
:param context: An instance of the request context.
:param identity: The UUID, name or short-ID of a cluster.
:return: A dictionary containing the details about a cluster.
"""
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
return cluster.to_dict()
def check_cluster_quota(self, context):
"""Validate the number of clusters created in a project.
:param context: An instance of the request context.
:return: None if cluster creation is okay, or an exception of type
`Forbbiden` if number of clusters reaches the maximum.
"""
existing = db_api.cluster_count_all(context)
maximum = cfg.CONF.max_clusters_per_project
if existing >= maximum:
raise exception.Forbidden()
@request_context
def cluster_create(self, context, name, desired_capacity, profile_id,
min_size=None, max_size=None, metadata=None,
timeout=None, host_cluster=None):
"""Create a cluster.
:param context: An instance of the request context.
:param name: A string specifying the name of the cluster to be created.
:param desired_capacity: The desired capacity of the cluster.
:param profile_ID: The UUID, name or short-ID of the profile to use.
:param min_size: An integer specifying the minimum size of the cluster.
:param max_size: An integer specifying the maximum size of the cluster.
:param metadata: A dictionary containing key-value pairs to be
associated with the cluster.
:param timeout: An optional integer specifying the operation timeout
value in seconds.
:return: A dictionary containing the details about the cluster and the
ID of the action triggered by this operation.
"""
self.check_cluster_quota(context)
if cfg.CONF.name_unique:
if db_api.cluster_get_by_name(context, name):
msg = _("The cluster (%(name)s) already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
try:
db_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile '%s' is not found.") % profile_id
raise exception.BadRequest(msg=msg)
init_size = utils.parse_int_param(consts.CLUSTER_DESIRED_CAPACITY,
desired_capacity)
if min_size is not None:
min_size = utils.parse_int_param(consts.CLUSTER_MIN_SIZE, min_size)
if max_size is not None:
max_size = utils.parse_int_param(consts.CLUSTER_MAX_SIZE, max_size,
allow_negative=True)
if timeout is not None:
timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)
res = su.check_size_params(None, init_size, min_size, max_size, True)
if res:
raise exception.BadRequest(msg=res)
LOG.info(_LI("Creating cluster '%s'."), name)
if host_cluster:
host_cluster = self.cluster_get(context, host_cluster)
host_nodes = host_cluster['nodes']
metadata.update(host_cluster=host_cluster['id'])
metadata.update(candidate_nodes=host_nodes)
kwargs = {
'min_size': min_size,
'max_size': max_size,
'timeout': timeout,
'metadata': metadata,
'user': context.user,
'project': context.project,
'domain': context.domain,
}
cluster = cluster_mod.Cluster(name, init_size, db_profile.id,
**kwargs)
cluster.store(context)
# Build an Action for cluster creation
kwargs = {
'name': 'cluster_create_%s' % cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, cluster.id,
consts.CLUSTER_CREATE, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster create action queued: %s."), action_id)
result = cluster.to_dict()
result['action'] = action_id
return result
@request_context
def cluster_update(self, context, identity, name=None, profile_id=None,
metadata=None, timeout=None):
"""Update a cluster.
:param context: An instance of the request context.
:param identity: The UUID, name, or short-ID or the target cluster.
:param name: A string specifying the new name of the cluster.
:param profile_id: The UUID, name or short-ID of the new profile.
:param metadata: A dictionary containing key-value pairs to be
associated with the cluster.
:param timeout: An optional integer specifying the new operation
timeout value in seconds.
:return: A dictionary containing the details about the cluster and the
ID of the action triggered by this operation.
"""
# Get the database representation of the existing cluster
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
if cluster.status == cluster.ERROR:
msg = _('Updating a cluster in error state')
LOG.error(msg)
raise exception.FeatureNotSupported(feature=msg)
LOG.info(_LI("Updating cluster '%s'."), identity)
inputs = {}
if profile_id is not None:
old_profile = self.profile_find(context, cluster.profile_id)
try:
new_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile '%s' is not found."
) % profile_id
raise exception.BadRequest(msg=msg)
if new_profile.type != old_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
if old_profile.id != new_profile.id:
inputs['new_profile_id'] = new_profile.id
if metadata is not None and metadata != cluster.metadata:
inputs['metadata'] = metadata
if timeout is not None:
timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)
inputs['timeout'] = timeout
if name is not None:
inputs['name'] = name
kwargs = {
'name': 'cluster_update_%s' % cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, cluster.id,
consts.CLUSTER_UPDATE, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster update action queued: %s."), action_id)
resp = cluster.to_dict()
resp['action'] = action_id
return resp
@request_context
def cluster_delete(self, context, identity):
"""Delete the specified cluster.
:param identity: The UUID, name or short-ID of the target cluster.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI('Deleting cluster %s'), identity)
db_cluster = self.cluster_find(context, identity)
policies = db_api.cluster_policy_get_all(context, db_cluster.id)
if len(policies) > 0:
msg = _('Cluster %(id)s cannot be deleted without having all '
'policies detached.') % {'id': identity}
LOG.error(msg)
raise exception.BadRequest(msg=msg)
receivers = db_api.receiver_get_all(context, filters={'cluster_id':
db_cluster.id})
if len(receivers) > 0:
msg = _('Cluster %(id)s cannot be deleted without having all '
'receivers deleted.') % {'id': identity}
LOG.error(msg)
raise exception.BadRequest(msg=msg)
params = {
'name': 'cluster_delete_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DELETE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster delete action queued: %s"), action_id)
return {'action': action_id}
@request_context
def cluster_add_nodes(self, context, identity, nodes):
"""Add specified nodes to the specified cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the target cluster.
:param nodes: A list of node identities where each item is the UUID,
name or short-id of a node.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI("Adding nodes '%(nodes)s' to cluster '%(cluster)s'."),
{'cluster': identity, 'nodes': nodes})
db_cluster = self.cluster_find(context, identity)
db_cluster_profile = self.profile_find(context,
db_cluster.profile_id)
cluster_profile_type = db_cluster_profile.type
found = []
not_found = []
bad_nodes = []
owned_nodes = []
not_match_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
# Skip node in the same cluster already
if db_node.status != node_mod.Node.ACTIVE:
bad_nodes.append(db_node.id)
elif len(db_node.cluster_id) != 0:
owned_nodes.append(db_node.id)
else:
# check profile type matching
db_node_profile = self.profile_find(context,
db_node.profile_id)
node_profile_type = db_node_profile.type
if node_profile_type != cluster_profile_type:
not_match_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_match_nodes) > 0:
error = _("Profile type of nodes %s does not match that of the "
"cluster.") % not_match_nodes
LOG.error(error)
raise exception.ProfileTypeNotMatch(message=error)
elif len(owned_nodes) > 0:
error = _("Nodes %s already owned by some cluster.") % owned_nodes
LOG.error(error)
raise exception.NodeNotOrphan(message=error)
elif len(bad_nodes) > 0:
error = _("Nodes are not ACTIVE: %s.") % bad_nodes
elif len(not_found) > 0:
error = _("Nodes not found: %s.") % not_found
elif len(found) == 0:
error = _("No nodes to add: %s.") % nodes
if error is not None:
LOG.error(error)
raise exception.BadRequest(msg=error)
target_size = db_cluster.desired_capacity + len(found)
error = su.check_size_params(db_cluster, target_size, strict=True)
if error:
LOG.error(error)
raise exception.BadRequest(msg=error)
params = {
'name': 'cluster_add_nodes_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {'nodes': found},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_ADD_NODES,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster add nodes action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_del_nodes(self, context, identity, nodes):
"""Delete specified nodes from the named cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the cluster.
:param nodes: A list containing the identities of the nodes to delete.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI("Deleting nodes '%(nodes)s' from cluster '%(cluster)s'."),
{'cluster': identity, 'nodes': nodes})
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
if db_node.cluster_id != db_cluster.id:
bad_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_found):
error = _("Nodes not found: %s.") % not_found
elif len(bad_nodes):
error = _("Nodes not members of specified cluster: "
"%s.") % bad_nodes
elif len(found) == 0:
error = _("No nodes specified.")
if error is not None:
LOG.error(error)
raise exception.BadRequest(msg=error)
target_size = db_cluster.desired_capacity - len(found)
error = su.check_size_params(db_cluster, target_size, strict=True)
if error:
LOG.error(error)
raise exception.BadRequest(msg=error)
params = {
'name': 'cluster_del_nodes_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
'candidates': found,
'count': len(found),
},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DEL_NODES,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster delete nodes action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_resize(self, context, identity, adj_type=None, number=None,
min_size=None, max_size=None, min_step=None,
strict=True):
"""Adjust cluster size parameters.
:param identity: cluster dentity which can be name, id or short ID;
:param adj_type: optional; if specified, must be one of the strings
defined in consts.ADJUSTMENT_TYPES;
:param number: number for adjustment. It is interpreted as the new
desired_capacity of the cluster if `adj_type` is set
to `EXACT_CAPACITY`; it is interpreted as the relative
number of nodes to add/remove when `adj_type` is set
to `CHANGE_IN_CAPACITY`; it is treated as a percentage
when `adj_type` is set to `CHANGE_IN_PERCENTAGE`.
This parameter is optional.
:param min_size: new lower bound of the cluster size, if specified.
This parameter is optional.
:param max_size: new upper bound of the cluster size, if specified;
A value of negative means no upper limit is imposed.
This parameter is optional.
:param min_step: optional. It specifies the number of nodes to be
added or removed when `adj_type` is set to value
`CHANGE_IN_PERCENTAGE` and the number calculated is
less than 1 or so.
:param strict: optional boolean value. It specifies whether Senlin
should try a best-effort style resizing or just
reject the request when scaling beyond its current
size constraint.
:return: A dict containing the ID of an action fired.
"""
# check adj_type
if adj_type is not None:
if adj_type not in consts.ADJUSTMENT_TYPES:
raise exception.InvalidParameter(
name=consts.ADJUSTMENT_TYPE, value=adj_type)
if number is None:
msg = _('Missing number value for size adjustment.')
raise exception.BadRequest(msg=msg)
else:
if number is not None:
msg = _('Missing adjustment_type value for size adjustment.')
raise exception.BadRequest(msg=msg)
if adj_type == consts.EXACT_CAPACITY:
number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number)
elif adj_type == consts.CHANGE_IN_CAPACITY:
number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number,
allow_negative=True)
elif adj_type == consts.CHANGE_IN_PERCENTAGE:
try:
number = float(number)
except ValueError:
raise exception.InvalidParameter(name=consts.ADJUSTMENT_NUMBER,
value=number)
# min_step is only used (so checked) for this case
if min_step is not None:
min_step = utils.parse_int_param(consts.ADJUSTMENT_MIN_STEP,
min_step)
if min_size is not None:
min_size = utils.parse_int_param(consts.ADJUSTMENT_MIN_SIZE,
min_size)
if max_size is not None:
max_size = utils.parse_int_param(consts.ADJUSTMENT_MAX_SIZE,
max_size, allow_negative=True)
if strict is not None:
strict = utils.parse_bool_param(consts.ADJUSTMENT_STRICT, strict)
db_cluster = self.cluster_find(context, identity)
current = db_cluster.desired_capacity
if adj_type is not None:
desired = su.calculate_desired(current, adj_type, number, min_step)
else:
desired = None
res = su.check_size_params(db_cluster, desired, min_size, max_size,
strict)
if res:
raise exception.BadRequest(msg=res)
fmt = _LI("Resizing cluster '%(cluster)s': type=%(adj_type)s, "
"number=%(number)s, min_size=%(min_size)s, "
"max_size=%(max_size)s, min_step=%(min_step)s, "
"strict=%(strict)s.")
LOG.info(fmt, {'cluster': identity, 'adj_type': adj_type,
'number': number, 'min_size': min_size,
'max_size': max_size, 'min_step': min_step,
'strict': strict})
params = {
'name': 'cluster_resize_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
consts.ADJUSTMENT_TYPE: adj_type,
consts.ADJUSTMENT_NUMBER: number,
consts.ADJUSTMENT_MIN_SIZE: min_size,
consts.ADJUSTMENT_MAX_SIZE: max_size,
consts.ADJUSTMENT_MIN_STEP: min_step,
consts.ADJUSTMENT_STRICT: strict
}
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_RESIZE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster resize action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_scale_out(self, context, identity, count=None):
"""Inflate the size of a cluster by then given number (optional).
:param context: Request context for the call.
:param identity: The name, ID or short ID of a cluster.
:param count: The number of nodes to add to the cluster. When omitted,
a policy gets a chance to decide the count number. When specified,
a policy would have to respect this input.
:return: A dict with the ID of the action fired.
"""
# Validation
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
metadata = cluster.to_dict()['metadata']
host_cluster = metadata.get('host_cluster', None)
candidate_hosts = []
if host_cluster:
host_cluster = self.cluster_get(context, host_cluster)
candidate_nodes = host_cluster['nodes']
host_nodes = metadata.get('host_nodes', None)
if host_nodes and candidate_nodes:
for node in candidate_nodes:
if node not in host_nodes:
candidate_hosts.append(node)
if candidate_hosts:
metadata.update(candidate_hosts=candidate_hosts)
cluster.metadate = metadata
cluster.store(context)
if count is not None:
count = utils.parse_int_param('count', count, allow_zero=False)
err = su.check_size_params(db_cluster,
db_cluster.desired_capacity + count)
if err:
raise exception.BadRequest(msg=err)
LOG.info(_LI('Scaling out cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': count})
inputs = {'count': count}
else:
LOG.info(_LI('Scaling out cluster %s'), db_cluster.name)
inputs = {}
params = {
'name': 'cluster_scale_out_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
if candidate_hosts:
params.update(candidate_hosts=candidate_hosts)
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_SCALE_OUT,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster Scale out action queued: %s"), action_id)
return {'action': action_id}
@request_context
def cluster_scale_in(self, context, identity, count=None):
"""Deflate the size of a cluster by given number (optional).
:param context: Request context for the call.
:param identity: The name, ID or short ID of a cluster.
:param count: The number of nodes to remove from the cluster. When
omitted, a policy gets a chance to decide the count number. When
specified, a policy would have to respect this input.
:return: A dict with the ID of the action fired.
"""
db_cluster = self.cluster_find(context, identity)
if count is not None:
count = utils.parse_int_param('count', count, allow_zero=False)
err = su.check_size_params(db_cluster,
db_cluster.desired_capacity - count)
if err:
raise exception.BadRequest(msg=err)
LOG.info(_LI('Scaling in cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': count})
inputs = {'count': count}
else:
LOG.info(_LI('Scaling in cluster %s'), db_cluster.name)
inputs = {}
params = {
'name': 'cluster_scale_in_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_SCALE_IN,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster Scale in action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_check(self, context, identity, params=None):
"""Check the status of a cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a cluster.
:param params: A dictionary containing additional parameters for
the check operation.
:return: A dictionary containg the ID of the action triggered.
"""
LOG.info(_LI("Checking Cluster '%(cluster)s'."),
{'cluster': identity})
db_cluster = self.cluster_find(context, identity)
params = {
'name': 'cluster_check_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_CHECK, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster check action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_recover(self, context, identity, params=None):
"""Recover a cluster to a healthy status.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a cluster.
:param params: A dictionary containing additional parameters for
the check operation.
:return: A dictionary containg the ID of the action triggered.
"""
LOG.info(_LI("Recovering cluster '%s'."), identity)
db_cluster = self.cluster_find(context, identity)
params = {
'name': 'cluster_recover_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_RECOVER, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster recover action queued: %s."), action_id)
return {'action': action_id}
def node_find(self, context, identity, project_safe=True):
"""Find a node with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param project_safe: A boolean indicating whether only nodes from the
same project as the requesting one are qualified
to be returned.
:return: A DB object of Node or an exception of `NodeNotFound` if no
matching object is found.
"""
if uuidutils.is_uuid_like(identity):
node = db_api.node_get(context, identity,
project_safe=project_safe)
if not node:
node = db_api.node_get_by_name(context, identity,
project_safe=project_safe)
else:
node = db_api.node_get_by_name(context, identity,
project_safe=project_safe)
if not node:
node = db_api.node_get_by_short_id(
context, identity, project_safe=project_safe)
if node is None:
raise exception.NodeNotFound(node=identity)
return node
@request_context
def node_list(self, context, cluster_id=None, filters=None, sort=None,
limit=None, marker=None, project_safe=True):
"""List node records matching the specified criteria.
:param context: An instance of the request context.
:param cluster_id: An optional parameter specifying the ID of the
cluster from which nodes are chosen.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the node after which the result
list starts.
:param project_safe: A boolean indicating whether nodes from all
projects will be returned.
:return: A list of `Node` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.NODE_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
# Maybe the cluster_id is a name or a short ID
if cluster_id:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
nodes = node_mod.Node.load_all(context, cluster_id=cluster_id,
limit=limit, marker=marker, sort=sort,
filters=filters,
project_safe=project_safe)
return [node.to_dict() for node in nodes]
@request_context
def node_create(self, context, name, profile_id, cluster_id=None,
role=None, metadata=None, host=None, container_name=None):
"""Create a node with provided properties.
:param context: An instance of the request context.
:param name: Name for the node to be created.
:param profile_id: The ID, name or short-id of the profile to be used.
:param cluster_id: The ID, name or short-id of the cluster in which
the new node will be a member. This could be None
if the node is to be a orphan node.
:param role: The role for the node to play in the cluster.
:param metadata: A dictionary containing the key-value pairs to be
associated with the node.
:return: A dictionary containing the details about the node to be
created along with the ID of the action triggered by this
request.
"""
if cfg.CONF.name_unique:
if db_api.node_get_by_name(context, name):
msg = _("The node named (%(name)s) already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating node '%s'."), name)
if cluster_id is None:
cluster_id = ''
try:
node_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile (%s) is not found.") % profile_id
raise exception.BadRequest(msg=msg)
index = -1
if cluster_id:
try:
db_cluster = self.cluster_find(context, cluster_id)
except exception.ClusterNotFound:
msg = _("The specified cluster (%s) is not found."
) % cluster_id
raise exception.BadRequest(msg=msg)
cluster_id = db_cluster.id
if node_profile.id != db_cluster.profile_id:
cluster_profile = self.profile_find(context,
db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
LOG.error(msg)
raise exception.ProfileTypeNotMatch(message=msg)
index = db_api.cluster_next_index(context, cluster_id)
# Create a node instance
if host:
host_node = self.node_find(context, host)
host_ip = self.get_host_ip(context, host)
metadata.update(host_ip=host_ip)
metadata.update(host_node=host_node.id)
if container_name:
metadata.update(container_name=container_name)
kwargs = {
'index': index,
'role': role,
'metadata': metadata or {},
'user': context.user,
'project': context.project,
'domain': context.domain,
}
node = node_mod.Node(name, node_profile.id, cluster_id, context,
**kwargs)
node.store(context)
params = {
'name': 'node_create_%s' % node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, node.id,
consts.NODE_CREATE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node create action queued: %s."), action_id)
result = node.to_dict()
result['action'] = action_id
return result
@request_context
def node_get(self, context, identity, show_details=False):
"""Get the details about a node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param show_details: Optional parameter indicating whether the details
about the physical object should be returned.
:return: A dictionary containing the detailed information about a node
or an exception of `NodeNotFound` if no matching node could
be found.
"""
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
res = node.to_dict()
if show_details and node.physical_id:
res['details'] = node.get_details(context)
return res
@request_context
def node_update(self, context, identity, name=None, profile_id=None,
role=None, metadata=None):
"""Update a node with new propertye values.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:param name: Optional string specifying the new name for the node.
:param profile_id: The UUID, name or short-id of the new profile to
be used.
:param role: The new role for the node, if specified.
:param metadata: A dictionary of key-value pairs to be associated with
the node.
:return: A dictionary containing the updated representation of the
node along with the ID of the action triggered by this
request.
"""
LOG.info(_LI("Updating node '%s'."), identity)
db_node = self.node_find(context, identity)
if profile_id:
try:
db_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile (%s) is not found."
) % profile_id
raise exception.BadRequest(msg=msg)
profile_id = db_profile.id
# check if profile_type matches
old_profile = self.profile_find(context, db_node.profile_id)
if old_profile.type != db_profile.type:
msg = _('Cannot update a node to a different profile type, '
'operation aborted.')
LOG.error(msg)
raise exception.ProfileTypeNotMatch(message=msg)
inputs = {'new_profile_id': profile_id}
else:
inputs = {}
if name is not None and name != db_node.name:
inputs['name'] = name
if role is not None and role != db_node.role:
inputs['role'] = role
if metadata is not None and metadata != db_node.metadata:
inputs['metadata'] = metadata
if inputs == {}:
msg = _("No property needs an update.")
raise exception.BadRequest(msg=msg)
params = {
'name': 'node_update_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_UPDATE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node update action is queued: %s."), action_id)
node = node_mod.Node.load(context, node=db_node)
resp = node.to_dict()
resp['action'] = action_id
return resp
@request_context
def node_delete(self, context, identity, container_name=None):
"""Delete the specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:return: A dictionary containing the ID of the action triggered by
this request.
"""
LOG.info(_LI('Deleting node %s'), identity)
db_node = self.node_find(context, identity)
params = {
'name': 'node_delete_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_DELETE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node delete action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def node_check(self, context, identity, params=None):
"""Check the health status of specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:param params: An dictionary providing additional input parameters
for the checking operation.
:return: A dictionary containing the ID of the action triggered by
this request.
"""
LOG.info(_LI("Checking node '%s'."), identity)
db_node = self.node_find(context, identity)
kwargs = {
'name': 'node_check_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_CHECK, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node check action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def node_recover(self, context, identity, params=None):
"""Recover the specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param params: A dictionary containing the optional parameters for
the requested recover operation.
:return: A dictionary containing the ID of the action triggered by the
recover request.
"""
LOG.info(_LI("Recovering node '%s'."), identity)
db_node = self.node_find(context, identity)
kwargs = {
'name': 'node_recover_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_RECOVER, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node recover action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_list(self, context, identity, filters=None, sort=None):
"""List cluster-policy bindings given the cluster identity.
:param context: An instance of the request context.
:param identity: The ID, name or short ID of the target cluster.
:param filters: A list of key-value pairs for filtering out the result
list.
:param sort: A list of sorting keys (optionally appended with sorting
directions) separated by commas.
:return: A list containing dictionaries each representing a binding.
"""
utils.validate_sort_param(sort, consts.CLUSTER_POLICY_SORT_KEYS)
db_cluster = self.cluster_find(context, identity)
bindings = cluster_policy.ClusterPolicy.load_all(
context, db_cluster.id, filters=filters, sort=sort)
return [binding.to_dict() for binding in bindings]
@request_context
def cluster_policy_get(self, context, identity, policy_id):
"""Get the binding record giving the cluster and policy identity.
:param context: An instance of the request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy_id: The ID, name or short ID of the target policy.
:return: A dictionary containing the binding record, or raises an
exception of ``PolicyNotAttached``.
"""
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy_id)
try:
binding = cluster_policy.ClusterPolicy.load(
context, db_cluster.id, db_policy.id)
except exception.PolicyNotAttached:
raise exception.PolicyBindingNotFound(policy=policy_id,
identity=identity)
return binding.to_dict()
@request_context
def cluster_policy_attach(self, context, identity, policy, enabled=True):
"""Attach a policy to the specified cluster.
This is done via an action because a cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:param enabled: Optional parameter specifying whether the policy is
enabled when attached.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Attaching policy (%(policy)s) to cluster "
"(%(cluster)s)."),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
params = {
'name': 'attach_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
'policy_id': db_policy.id,
'enabled': utils.parse_bool_param('enabled', enabled) or True,
}
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_ATTACH_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy attach action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_detach(self, context, identity, policy):
"""Detach a policy from the specified cluster.
This is done via an action because cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Detaching policy '%(policy)s' from cluster "
"'%(cluster)s'."),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
if binding is None:
msg = _("The policy (%(p)s) is not attached to the specified "
"cluster (%(c)s).") % {'p': policy, 'c': identity}
raise exception.BadRequest(msg=msg)
params = {
'name': 'detach_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {'policy_id': db_policy.id},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DETACH_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy dettach action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_update(self, context, identity, policy, enabled=None):
"""Update an existing policy binding on a cluster.
This is done via an action because cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:param enabled: Optional parameter specifying whether the policy is
enabled after the update.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Updating policy '%(policy)s' on cluster '%(cluster)s.'"),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
if binding is None:
msg = _("The policy (%(p)s) is not attached to the specified "
"cluster (%(c)s).") % {'p': policy, 'c': identity}
raise exception.BadRequest(msg=msg)
inputs = {'policy_id': db_policy.id}
if enabled is not None:
inputs['enabled'] = utils.parse_bool_param('enabled', enabled)
params = {
'name': 'update_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_UPDATE_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy update action queued: %s."), action_id)
return {'action': action_id}
def action_find(self, context, identity, project_safe=True):
"""Find an action with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action.
:return: A DB object of action or an exception `ActionNotFound` if no
matching action is found.
"""
if uuidutils.is_uuid_like(identity):
action = db_api.action_get(context, identity,
project_safe=project_safe)
if not action:
action = db_api.action_get_by_name(context, identity,
project_safe=project_safe)
else:
action = db_api.action_get_by_name(context, identity,
project_safe=project_safe)
if not action:
action = db_api.action_get_by_short_id(
context, identity, project_safe=project_safe)
if not action:
raise exception.ActionNotFound(action=identity)
return action
@request_context
def action_list(self, context, filters=None, limit=None, marker=None,
sort=None, project_safe=True):
"""List action records matching the specified criteria.
:param context: An instance of the request context.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the action after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param project_safe: A boolean indicating whether actions from all
projects will be returned.
:return: A list of `Action` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.ACTION_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
results = action_mod.Action.load_all(context, filters=filters,
limit=limit, marker=marker,
sort=sort,
project_safe=project_safe)
return [a.to_dict() for a in results]
@request_context
def action_create(self, context, name, cluster, action, inputs=None):
"""Create an action with given details.
:param context: Request context instance.
:param name: Name of the action.
:param cluster: Name, ID or short ID of the targeted cluster.
:param action: String representation of the action.
:param inputs: Optional inputs for the action.
:return: A dict containing the action created.
"""
LOG.info(_LI("Creating action '%s'."), name)
target = self.cluster_find(context, cluster)
# Create an action instance
params = {
'name': name,
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs or {},
}
action_id = action_mod.Action.create(context, target.id, action,
**params)
# TODO(Anyone): Uncomment this to notify the dispatcher
# dispatcher.start_action(action_id=action.id)
LOG.info(_LI("Action '%(name)s' is created: %(id)s."),
{'name': name, 'id': action_id})
return {'action': action_id}
@request_context
def action_get(self, context, identity):
"""Get the details about specified action.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action.
:return: A dictionary containing the details about an action, or an
exception `ActionNotFound` if no matching action is found.
"""
db_action = self.action_find(context, identity)
action = action_mod.Action.load(context, db_action=db_action)
return action.to_dict()
@request_context
def action_delete(self, context, identity):
"""Delete the specified action object.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action object.
:return: None if deletion was successful, or an exception of type
`ResourceInUse`.
"""
db_action = self.action_find(context, identity)
LOG.info(_LI("Deleting action '%s'."), identity)
try:
action_mod.Action.delete(context, db_action.id)
except exception.ResourceBusyError:
raise exception.ResourceInUse(resource_type='action',
resource_id=db_action.id)
LOG.info(_LI("Action '%s' is deleted."), identity)
def receiver_find(self, context, identity, project_safe=True):
"""Find a receiver with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:param project_safe: A boolean indicating whether receiver from other
projects other than the requesting one can be
returned.
:return: A DB object of receiver or an exception `ReceiverNotFound`
if no matching reciever is found.
"""
if uuidutils.is_uuid_like(identity):
receiver = db_api.receiver_get(context, identity,
project_safe=project_safe)
if not receiver:
receiver = db_api.receiver_get_by_name(
context, identity, project_safe=project_safe)
else:
receiver = db_api.receiver_get_by_name(
context, identity, project_safe=project_safe)
if not receiver:
receiver = db_api.receiver_get_by_short_id(
context, identity, project_safe=project_safe)
if not receiver:
raise exception.ReceiverNotFound(receiver=identity)
return receiver
@request_context
def receiver_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List receivers matching the specified criteria.
:param context: An instance of the request context.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the receiver after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether receivers from all
projects will be returned.
:return: A list of `Receiver` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.RECEIVER_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
receivers = receiver_mod.Receiver.load_all(context, limit=limit,
marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [r.to_dict() for r in receivers]
@request_context
def receiver_create(self, context, name, type_name, cluster_id, action,
actor=None, params=None):
"""Create a receiver.
:param context: An instance of the request context.
:param name: Name of the receiver.
:param type_name: Name of the receiver type, subject to validation.
:param cluster_id: UUID, name or short-id of a cluster.
:param action: Name or ID of an action, currently only builtin action
names are supported.
:param actor: Future extension.
:param params: A dictionary containing key-value pairs as inputs to
the action.
:return: A dictionary containing the details about the receiver
created.
"""
if cfg.CONF.name_unique:
if db_api.receiver_get_by_name(context, name):
msg = _("A receiver named '%s' already exists.") % name
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating receiver %(n)s: \n"
" type: %(t)s\n cluster: %(c)s\n action: %(a)s."),
{'n': name, 't': type_name, 'c': cluster_id, 'a': action})
rtype = type_name.lower()
if rtype not in consts.RECEIVER_TYPES:
msg = _("Receiver type '%s' is not supported.") % rtype
raise exception.BadRequest(msg=msg)
# Check whether cluster identified by cluster_id does exist
cluster = None
try:
cluster = self.cluster_find(context, cluster_id)
except exception.ClusterNotFound:
msg = _("The referenced cluster '%s' is not found.") % cluster_id
raise exception.BadRequest(msg=msg)
# permission checking
if not context.is_admin and context.user != cluster.user:
raise exception.Forbidden()
# Check action name
if action not in consts.ACTION_NAMES:
msg = _("Illegal action '%s' specified.") % action
raise exception.BadRequest(msg=msg)
if action.lower().split('_')[0] != 'cluster':
msg = _("Action '%s' is not applicable to clusters.") % action
raise exception.BadRequest(msg=msg)
if not params:
params = {}
kwargs = {
'name': name,
'user': context.user,
'project': context.project,
'domain': context.domain,
'params': params
}
receiver = receiver_mod.Receiver.create(context, rtype, cluster,
action, **kwargs)
LOG.info(_LI("Receiver (%(n)s) is created: %(i)s."),
{'n': name, 'i': receiver.id})
return receiver.to_dict()
@request_context
def receiver_get(self, context, identity, project_safe=True):
"""Get the details about a receiver.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:param project_safe: Whether matching object from other projects can
be returned.
:return: A dictionary containing the details about a receiver or
an exception `ReceiverNotFound` if no matching object found.
"""
db_receiver = self.receiver_find(context, identity,
project_safe=project_safe)
receiver = receiver_mod.Receiver.load(context,
receiver_obj=db_receiver,
project_safe=project_safe)
return receiver.to_dict()
@request_context
def receiver_delete(self, context, identity):
"""Delete the specified receiver.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:return: None if successfully deleted the receiver or an exception of
`ReceiverNotFound` if the object could not be found.
"""
db_receiver = self.receiver_find(context, identity)
LOG.info(_LI("Deleting receiver %s."), identity)
db_api.receiver_delete(context, db_receiver.id)
LOG.info(_LI("Receiver %s is deleted."), identity)
@request_context
def webhook_trigger(self, context, identity, params=None):
LOG.info(_LI("Triggering webhook (%s)."), identity)
receiver = self.receiver_find(context, identity)
try:
cluster = self.cluster_find(context, receiver.cluster_id)
except exception.ClusterNotFound:
msg = _("The referenced cluster (%s) is not found."
) % receiver.cluster_id
raise exception.BadRequest(msg=msg)
data = copy.deepcopy(receiver.params)
if params:
data.update(params)
kwargs = {
'name': 'webhook_%s' % receiver.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': data,
}
action_id = action_mod.Action.create(context, cluster.id,
receiver.action, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Webhook %(w)s' triggered with action queued: %(a)s."),
{'w': identity, 'a': action_id})
return {'action': action_id}
def event_find(self, context, identity, project_safe=True):
"""Find an event with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the event.
:param project_safe: A boolean specifying that only events from the
same project as the requesting one are qualified
to be returned.
:return: A dictionary containing the details of the event.
"""
event = None
if uuidutils.is_uuid_like(identity):
event = db_api.event_get(context, identity,
project_safe=project_safe)
if not event:
event = db_api.event_get_by_short_id(context, identity,
project_safe=project_safe)
if not event:
raise exception.EventNotFound(event=identity)
return event
@request_context
def event_list(self, context, filters=None, limit=None, marker=None,
sort=None, project_safe=True):
"""List event records matching the specified criteria.
:param context: An instance of the request context.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the event after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param project_safe: A boolean indicating whether events from all
projects will be returned.
:return: A list of `Event` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.EVENT_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
if filters and consts.EVENT_LEVEL in filters:
value = filters.pop(consts.EVENT_LEVEL)
value = utils.parse_level_values(value)
if value is not None:
filters[consts.EVENT_LEVEL] = value
all_events = db_api.event_get_all(context, filters=filters,
limit=limit, marker=marker,
sort=sort, project_safe=project_safe)
results = [event.as_dict() for event in all_events]
return results
@request_context
def event_get(self, context, identity):
"""Get the details about a specified event.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an event.
:return: A dictionary containing the details about the event or an
exception of `EventNotFound` if no matching record could be
found.
"""
db_event = self.event_find(context, identity)
return db_event.as_dict()
def get_host_ip(self, context, host):
if host:
db_node = self.node_find(context, host)
physical_id = db_node.physical_id
if not physical_id:
return
node = node_mod.Node.load(context, node_id=db_node.id)
details = node.get_details(context)
for output in details.outputs:
if output['output_key'] == 'floating_ip':
server_ip = output['output_value']
return server_ip
@request_context
def container_list(self, context, limit, host):
server_ip = self.get_host_ip(context, host)
if server_ip:
url = 'tcp://' + server_ip + ':2375'
docker_cli = Client(base_url=url)
containers = docker_cli.containers(all=True)
for j in range(len(containers)):
containers[j]['Server'] = server_ip
return containers
| apache-2.0 | -2,164,811,942,441,370,400 | 41.933124 | 79 | 0.569212 | false |
shoopio/shoop | shuup_tests/admin/test_shipment_creator.py | 2 | 7442 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import re
import pytest
from bs4 import BeautifulSoup
from django import forms
from shuup.admin.form_modifier import FormModifier
from shuup.admin.modules.orders.views.shipment import (
OrderCreateShipmentView, ShipmentForm
)
from shuup.apps.provides import override_provides
from shuup.core.excs import NoShippingAddressException
from shuup.core.models import Order
from shuup.testing.factories import (
create_order_with_product, create_product, get_default_shop,
get_default_supplier
)
from shuup.testing.utils import apply_request_middleware
@pytest.mark.django_db
def test_shipment_creating_view_get(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
quantity = 1
order = create_order_with_product(
product, supplier, quantity=quantity, taxless_base_unit_price=1, shop=shop)
request = apply_request_middleware(rf.get("/"), user=admin_user)
view = OrderCreateShipmentView.as_view()
response = view(request, pk=order.pk, supplier_pk=supplier.pk).render()
assert response.status_code == 200
# Should contain supplier input and input for product
soup = BeautifulSoup(response.content)
assert soup.find("input", {"id": "id_q_%s" % product.pk})
@pytest.mark.django_db
def test_shipment_creating_view_post(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
order = create_order_with_product(product, supplier, quantity=1, taxless_base_unit_price=1, shop=shop)
data = {
"q_%s" % product.pk: 1
}
request = apply_request_middleware(rf.post("/", data=data), user=admin_user)
view = OrderCreateShipmentView.as_view()
response = view(request, pk=order.pk, supplier_pk=supplier.pk)
assert response.status_code == 302
# Order should have shipment
assert order.shipments.count() == 1
shipment = order.shipments.first()
assert shipment.supplier_id == supplier.id
assert shipment.products.count() == 1
assert shipment.products.first().product_id == product.id
@pytest.mark.django_db
def test_extending_shipment_with_extra_fields(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
quantity = 1
order = create_order_with_product(
product, supplier, quantity=quantity, taxless_base_unit_price=1, shop=shop)
extend_form_class = "shuup_tests.admin.test_shipment_creator.ShipmentFormModifierTest"
with override_provides(ShipmentForm.form_modifier_provide_key, [extend_form_class]):
request = apply_request_middleware(rf.get("/"), user=admin_user)
view = OrderCreateShipmentView.as_view()
response = view(request, pk=order.pk, supplier_pk=supplier.pk).render()
assert response.status_code == 200
# Should contain supplier input, input for product and input for phone
soup = BeautifulSoup(response.content)
assert soup.find("input", {"id": "id_q_%s" % product.pk})
assert soup.find("input", {"id": "id_phone"})
@pytest.mark.django_db
def test_extending_shipment_clean_hook(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
quantity = 1
order = create_order_with_product(
product, supplier, quantity=quantity, taxless_base_unit_price=1, shop=shop)
extend_form_class = "shuup_tests.admin.test_shipment_creator.ShipmentFormModifierTest"
with override_provides(ShipmentForm.form_modifier_provide_key, [extend_form_class]):
data = {
"q_%s" % product.pk: 1,
"phone": "911"
}
request = apply_request_middleware(rf.post("/", data=data), user=admin_user)
view = OrderCreateShipmentView.as_view()
response = view(request, pk=order.pk, supplier_pk=supplier.pk).render()
assert response.status_code == 200
soup = BeautifulSoup(response.content)
assert soup.body.findAll(text=re.compile("Phone number should start with country code!"))
@pytest.mark.django_db
def test_extending_shipment_form_valid_hook(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
quantity = 1
order = create_order_with_product(
product, supplier, quantity=quantity, taxless_base_unit_price=1, shop=shop)
extend_form_class = "shuup_tests.admin.test_shipment_creator.ShipmentFormModifierTest"
with override_provides(ShipmentForm.form_modifier_provide_key, [extend_form_class]):
phone_number = "+358911"
data = {
"q_%s" % product.pk: 1,
"phone": phone_number
}
request = apply_request_middleware(rf.post("/", data=data), user=admin_user)
view = OrderCreateShipmentView.as_view()
response = view(request, pk=order.pk, supplier_pk=supplier.pk)
assert response.status_code == 302
# Order should now have shipment, but let's re fetch it first
order = Order.objects.get(pk=order.pk)
assert order.shipments.count() == 1
shipment = order.shipments.first()
assert order.shipping_data.get(shipment.identifier).get("phone") == phone_number
assert shipment.supplier_id == supplier.id
assert shipment.products.count() == 1
assert shipment.products.first().product_id == product.id
class ShipmentFormModifierTest(FormModifier):
def get_extra_fields(self, order):
return [("phone", forms.CharField(label="Phone", max_length=64, required=False))]
def clean_hook(self, form):
cleaned_data = form.cleaned_data
phone = cleaned_data.get("phone")
if not phone.startswith("+"):
form.add_error("phone", "Phone number should start with country code!")
def form_valid_hook(self, form, shipment):
data = form.cleaned_data
if data.get("phone"):
shipping_data = shipment.order.shipping_data or {}
shipping_data[shipment.identifier] = {"phone": data.get("phone")}
shipment.order.shipping_data = shipping_data
shipment.order.save()
@pytest.mark.django_db
def test_shipment_creating_with_no_shipping_address(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product(sku="test-sku", shop=shop, supplier=supplier, default_price=3.33)
order = create_order_with_product(product, supplier, quantity=1, taxless_base_unit_price=1, shop=shop)
# remove shipping address
order.shipping_address = None
order.save()
with pytest.raises(NoShippingAddressException):
order.create_shipment_of_all_products()
# order should not have any shipments since it should have thrown an exception
assert order.shipments.count() == 0
| agpl-3.0 | 8,639,049,885,013,082,000 | 39.89011 | 106 | 0.688793 | false |
Joacchim/Comix | src/archive.py | 1 | 20550 | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
"""Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
return SEVENZIP
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def _get_rar_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None
| gpl-2.0 | 2,014,588,338,973,242,000 | 37.555347 | 107 | 0.522238 | false |
debjyoti385/dartnews | crawling/newsParser.py | 1 | 2286 | from HTMLParser import HTMLParser
import sys, re,os
from os import listdir
import Levenshtein
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
f=open('latlongmapping.csv',"r")
L=[]
for line in f:
(lat,lng,address)=line.strip().split("|")
L.append((lat,lng,address))
f.close()
newsCount=0
if __name__=='__main__':
if len(sys.argv)<4:
print "Please provide <news webpages directory> <urlmapping file> <outputnewssummary file>"
sys.exit(1)
mypath=sys.argv[1]
urlmappingfile=open(sys.argv[2])
print 'calculating url mapping ...'
urlmapping={}
for line in urlmappingfile:
sp=line.strip().split(",")
urlmapping[sp[0]]=sp[1]
print 'url mapping calculated, starting parser...'
out=open(sys.argv[3],"w")
onlyfiles = [ os.path.join(mypath,f) for f in listdir(mypath) ]
fcount=0
for filepath in onlyfiles:
f=open(filepath)
content=f.read()
f.close()
headlineSearch=re.search('<h1[^<]*>(.*)</h1>',content)
headline=""
if headlineSearch:
headline=strip_tags(headlineSearch.group(1))
time=re.search('((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[^<]*IST)',content)
if time:
time=strip_tags(time.group(1))
else:
time=""
news=re.search('<div[\s]+class="Normal">(.*)</div>[\s]*<',content)
if news:
news=strip_tags(news.group(1))
else:
news=re.findall('<div [^<]*mod-articletext[^<]*>(.*)</div>[\w\s]*<',content)
newsstr=""
if news:
for n in news:
newsstr+=(" "+strip_tags(n))
news=newsstr
if news=='':
#print "Got empty news in",filepath
pass
if 'delhi' in headline.lower() or 'delhi' in news[:50].lower():
url=urlmapping[filepath.split("/")[-1]]
D={}
for (lat,lng,address) in L:
s=0
for keyword in address.split(",")[0:2]:
if keyword in news.lower():
s+=1
D[(lat,lng,address)]=s
entries=sorted(D,key=lambda x: D[x],reverse=True)
if entries!=[]:
print entries[0],news,s
#out.write(time+"\x01"+headline+'\x01'+news+"\x01"+url+"\n");
fcount+=1
if fcount%10000==0:
print 'Processed',fcount,'files'
out.close()
| bsd-2-clause | -7,346,165,303,894,431,000 | 25.581395 | 93 | 0.628171 | false |
ExaScience/smurff | data/jaak/make.py | 1 | 3536 | #!/usr/bin/env python
import smurff.matrix_io as mio
import urllib.request
import scipy.io as sio
import os
from hashlib import sha256
import smurff
urls = [
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-346targets.mm",
"10c3e1f989a7a415a585a175ed59eeaa33eff66272d47580374f26342cddaa88",
"chembl-IC50-346targets.mm",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-compound-feat.mm",
"f9fe0d296272ef26872409be6991200dbf4884b0cf6c96af8892abfd2b55e3bc",
"chembl-IC50-compound-feat.mm",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-compounds.csv",
"e8f045a67ee149c6100684e07920036de72583366596eb5748a79be6e3b96f7c",
"chembl-IC50-compounds.csv",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-proteins-uniprot.csv",
"224b1b44abcab8448b023874f4676af30d64fe651754144f9cbdc67853b76ea8",
"chembl-IC50-proteins-uniprot.csv",
),
]
for url, expected_sha, output in urls:
if os.path.isfile(output):
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha == actual_sha):
continue
print("download %s" % output)
urllib.request.urlretrieve(url, output)
ic50 = sio.mmread("chembl-IC50-346targets.mm")
feat = sio.mmread("chembl-IC50-compound-feat.mm")
ic50_100c = ic50.tocsr()[0:100,:]
ic50_100c_train, ic50_100c_test = smurff.make_train_test(ic50_100c, 0.2, 1234)
# 0,1 binary for probit
ic50_01 = ic50.copy()
ic50_01.data = (ic50_01.data >= 6) * 1.
# -1,+1
ic50_11 = ic50.copy()
ic50_11.data = ((ic50.data >= 6) * 2.) - 1.
feat_100 = feat.tocsr()[0:100,:]
feat_100 = feat_100[:,feat_100.getnnz(0)>0]
feat_100_dense = feat_100.todense()
generated_files = [
( "f0d2ad6cf8173a64e12b48821e683b642b593555c552f4abf1f10ba255af78fc", "chembl-IC50-100compounds-feat-dense.ddm", feat_100_dense,),
( "0dd148a0da1a11ce6c6c3847d0cc2820dc9c819868f964a653a0d42063ce5c42", "chembl-IC50-100compounds-feat.sdm", feat_100,),
( "973074474497b236bf75fecfe9cc17471783fd40dbdda158b81e0ebbb408d30b", "chembl-IC50-346targets-01.sdm", ic50_01,),
( "5d7c821cdce02b4315a98a94cba5747e82d423feb1a2158bf03a7640aa82625d", "chembl-IC50-346targets-100compounds.sdm", ic50_100c,),
( "c70dbc990a5190d1c5d83594259abf10da409d2ba853038ad8f0e36f76ab56a8", "chembl-IC50-346targets-100compounds-train.sdm", ic50_100c_train,),
( "b2d7f742f434e9b933c22dfd45fa28d9189860edd1e42a6f0a5477f6f6f7d122", "chembl-IC50-346targets-100compounds-test.sdm", ic50_100c_test,),
( "bcf5cee9702e318591b76f064859c1d0769158d0b0f5c44057392c2f9385a591", "chembl-IC50-346targets-11.sdm", ic50_11,),
( "1defd1c82ac3243ad60a23a753287df494d3b50f2fd5ff7f4a074182b07e3318", "chembl-IC50-346targets.sdm", ic50, ),
( "badfa23abb83e0b731e969e1117fd4269f2df16e1faf14eb54c53c60465e87f1", "chembl-IC50-compound-feat.sdm", feat, ),
]
for expected_sha, output, data in generated_files:
if os.path.isfile(output):
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha == actual_sha):
continue
print("make %s" % output)
mio.write_matrix(output, data)
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha != actual_sha):
print("Checksum mismatch for %s: expected %s, got %s" % (output, expected_sha, actual_sha))
| mit | 2,601,276,451,595,209,000 | 41.095238 | 145 | 0.680147 | false |
liorsion/django-backlinks | src/backlinks/pingback/client.py | 1 | 4452 | import re
import xmlrpclib
import urllib
from backlinks.exceptions import fault_code_to_client_error, \
BacklinkClientError, BacklinkClientRemoteError, \
BacklinkClientConnectionError, BacklinkClientServerDoesNotExist,\
BacklinkClientAccessDenied, BacklinkClientInvalidResponse
from backlinks.conf import settings
from backlinks.utils import url_reader
# See http://hixie.ch/specs/pingback/pingback#TOC2.3
PINGBACK_RE = re.compile(r'<link rel="pingback" href="(?P<pingback_url>[^"]+)" ?/?>')
# Override the user agent for xmlrpclib's ServerProxy
class BacklinksTransport(xmlrpclib.Transport):
user_agent = settings.USER_AGENT_STRING
class BacklinksSafeTransport(xmlrpclib.SafeTransport):
user_agent = settings.USER_AGENT_STRING
# Build a nice ServerProxy replacement that will use our transport classes
class BacklinksServerProxy(xmlrpclib.ServerProxy):
transport_class = BacklinksTransport
safe_transport_class = BacklinksSafeTransport
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, use_datetime=0):
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
transport = self.safe_transport_class(use_datetime=use_datetime)
else:
transport = self.transport_class(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, methodname, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
class PingbackClient(object):
"""
A client for the Pingback protocol.
"""
# Default proxy class
proxy_class = BacklinksServerProxy
def __init__(self, proxy_class=None):
self.proxy_class = proxy_class or self.proxy_class
def autodiscover(self, link, response):
"""
Determine the Pingback server URL for a given response for a resource.
"""
pingback_url = response.headers.getheader('x-pingback', None)
if not pingback_url:
match = PINGBACK_RE.search(response.body)
if match:
pingback_url = match.group('pingback_url')
return pingback_url
def ping(self, ping_url, target_url, source_url, verbose=False, *args, **kwargs):
"""
Attempt to ping a resource using the given Pingback server URL.
"""
try:
server = self.proxy_class(ping_url, verbose=verbose)
result = server.pingback.ping(source_url, target_url)
return True
except xmlrpclib.Fault, e:
exception_class = fault_code_to_client_error.get(int(e.faultCode),
BacklinkClientError)
raise exception_class(reason=e.faultString)
except xmlrpclib.ProtocolError, e:
if e.errcode == 404:
raise BacklinkClientServerDoesNotExist
elif e.errcode == 500:
raise BacklinkClientRemoteError
elif e.errcode in (401, 403):
raise BacklinkClientAccessDenied
raise BacklinkClientConnectionError(reason=e.errmsg)
except xmlrpclib.ResponseError, e:
raise BacklinkClientInvalidResponse(reason=e.message)
except Exception, e:
raise BacklinkClientError(reason=str(e))
# A default instance of the Pingback client for convenience.
default_client = PingbackClient()
| bsd-3-clause | -388,211,051,411,919,940 | 33.511628 | 85 | 0.621968 | false |
haystack/eyebrowse-server | notifications/views.py | 1 | 6176 | import datetime
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils import timezone
from annoying.decorators import render_to
from accounts.models import UserProfile
from api.models import EyeHistory, PopularHistoryInfo
from api.utils import humanize_time
from common.constants import EMPTY_SEARCH_MSG
from common.view_helpers import _template_values
from eyebrowse.log import logger
from live_stream.query_managers import profile_stat_gen
from live_stream.query_managers import online_user
from notifications.models import Notification
from notifications.models import NoticeType, NOTICE_MEDIA
from notifications.utils import notice_setting_for_user
from stats.models import FavData
@login_required
@render_to('notifications/notifications.html')
def notifications(request):
user = get_object_or_404(User, username=request.user.username)
userprof = UserProfile.objects.get(user=user)
confirmed = userprof.confirmed
if not confirmed:
return redirect('/consent')
empty_search_msg = EMPTY_SEARCH_MSG['notifications']
# stats
tot_time, item_count = profile_stat_gen(user)
fav_data = FavData.objects.get(user=user)
num_history = EyeHistory.objects.filter(user=user).count()
is_online = online_user(user=user)
following_users = user.profile.follows.all()
following_count = following_users.count()
follower_count = UserProfile.objects.filter(follows=user.profile).count()
notifications = notification_renderer(user, empty_search_msg)
nots = Notification.objects.filter(recipient=user, seen=False)
for n in nots:
n.seen = True
n.save()
template_dict = {
"username": user.username,
"following_count": following_count,
"follower_count": follower_count,
"is_online": is_online,
"num_history": num_history,
"notifications": notifications,
"tot_time": tot_time,
"item_count": item_count,
"fav_data": fav_data,
}
return _template_values(request,
page_title="notifications",
navbar='notify',
sub_navbar="subnav_data",
not_count=0,
**template_dict)
def notification_renderer(user, empty_search_msg):
notifications = Notification.objects.filter(
recipient=user).select_related().order_by('-date_created')
for notif in notifications:
if notif.notice_type.label != "new_follower":
pop = PopularHistoryInfo.objects.filter(url=notif.url)
if pop.exists():
notif.description = pop[0].description
notif.img_url = pop[0].img_url
notif.favIconUrl = pop[0].favIconUrl
notif.title = pop[0].title
notif.hum_date = humanize_time(
timezone.now() - notif.date_created)
else:
notif.description = None
template_dict = {'notifications': notifications,
'empty_search_msg': empty_search_msg, }
return render_to_string('notifications/notification_list.html', template_dict)
class NoticeSettingsView(TemplateView):
template_name = "notifications/notice_settings.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(NoticeSettingsView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['nav_account'] = 'active'
context['email_notifications'] = 'active'
context['user'] = request.user
context['page_title'] = 'Set Email Notifications'
return self.render_to_response(context)
@property
def scoping(self):
return None
def setting_for_user(self, notice_type, medium_id):
return notice_setting_for_user(
self.request.user,
notice_type,
medium_id,
scoping=self.scoping
)
def form_label(self, notice_type, medium_id):
return "setting-{0}-{1}".format(
notice_type.pk,
medium_id
)
def process_cell(self, label):
val = self.request.POST.get(label)
_, pk, medium_id = label.split("-")
notice_type = NoticeType.objects.get(pk=pk)
setting = self.setting_for_user(notice_type, medium_id)
if val == "on":
setting.send = True
else:
setting.send = False
setting.save()
def settings_table(self):
notice_types = NoticeType.objects.all()
table = []
for notice_type in notice_types:
row = []
for medium_id, medium_display in NOTICE_MEDIA:
setting = self.setting_for_user(notice_type, medium_id)
row.append((
self.form_label(notice_type, medium_id),
setting.send)
)
table.append({"notice_type": notice_type, "cells": row})
return table
def post(self, request, *args, **kwargs):
table = self.settings_table()
for row in table:
for cell in row["cells"]:
self.process_cell(cell[0])
return HttpResponseRedirect(request.POST.get("next_page", "."))
def get_context_data(self, **kwargs):
settings = {
"column_headers": [
medium_display
for _, medium_display in NOTICE_MEDIA
],
"rows": self.settings_table(),
}
context = super(NoticeSettingsView, self).get_context_data(**kwargs)
context.update({
"notice_types": NoticeType.objects.all(),
"notice_settings": settings
})
return context
| mit | 3,723,686,512,894,883,300 | 31.505263 | 82 | 0.622733 | false |
vanossj/pyAtlasBoneSegmentation | src/ICP.py | 1 | 22312 | '''
Created on Feb 15, 2012
@author: Jeff
'''
import numpy
import numpyTransform
from scipy.spatial import cKDTree as KDTree
# from scipy.spatial import Delaunay
from scipy.spatial.distance import cdist
import scipy.optimize
import time
from math import pi
from MatlabFunctions import MatlabFmincon
import nlopt
import sys
class ICP(object):
'''
classdocs
'''
def __init__(self, modelPointCloud, dataPointCloud, **kwargs):
'''
Supported Signatures
modelPointCloud
The model point cloud is the base to which the data point cloud will be matched
dataPointCloud
The data point cloud is transformed so that it matches the model point cloud
Key Word Arguments:
maxIterations
maximum number of iterations to perform, default is 10
TODO: in the future provide an option to also account for minimum acceptable error
matchingMethod
'kdtree' Use a KD-Tree for nearest neighbor search {default}
'bruteforce' Use brute force for nearest neighbor search
minimizeMethod
'point' Use point to point minimization {default}
'plane' Use point to plane minimization
weightMethod
function that takes indices into the modelPointCloud and returns the weight of those indices
By default all points are weighted equally
modelDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
dataDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
ICP Process is five steps
1: Input Filter
2: Match
3: Outlier Filter
4: Error Minimization
5: Check if error is less than limits
yes: we are don
no: go back to step 2 with new transformation function
'''
self.startTime = time.time()
if 'modelDownsampleFactor' in kwargs and int(kwargs['modelDownsampleFactor']) > 1:
factor = int(kwargs['modelDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
modelDownSampleIndices = numpy.tile(temp, (modelPointCloud.shape[0] / factor) + 1)[:modelPointCloud.shape[0]]
else:
modelDownSampleIndices = numpy.ones(modelPointCloud.shape[0], dtype=numpy.bool)
if 'dataDownsampleFactor' in kwargs and int(kwargs['dataDownsampleFactor']) > 1:
factor = int(kwargs['dataDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
dataDownSampleIndices = numpy.tile(temp, (dataPointCloud.shape[0] / factor) + 1)[:dataPointCloud.shape[0]]
else:
dataDownSampleIndices = numpy.ones(dataPointCloud.shape[0], dtype=numpy.bool)
# TODO: uniform sampling of point clouds
self.q = modelPointCloud[modelDownSampleIndices]
self.p = dataPointCloud[dataDownSampleIndices]
self.matlab = None
# get kwargs
if 'maxIterations' in kwargs:
self.K = int(kwargs['maxIterations'])
else:
self.K = 10
if 'matchingMethod' in kwargs:
if kwargs['matchingMethod'] == 'bruteforce':
self.matching = self.matchingBruteForce
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
if 'minimizeMethod' in kwargs:
if kwargs['minimizeMethod'] == 'plane': # point to plane
self.minimize = self.minimizePlane
elif kwargs['minimizeMethod'] == 'fmincon':
self.minimize = self.minimizeMatlab
self.matlab = MatlabFmincon()
elif kwargs['minimizeMethod'] == 'custom':
self.minimize = self.minimizeCustom
else: # point to point
self.minimize = self.minimizePoint
else:
self.minimize = self.minimizePoint
if 'weightMethod' in kwargs:
self.weightMethod = kwargs['weightMethod']
else:
self.weightMethod = self.weightEqual
# initialize translation and rotation matrix
self.transformMatrix = numpy.matrix(numpy.identity(4))
# initialize list of translations and rotation matrix for each iteration of ICP
self.totalTransformMatrix = [numpy.matrix(numpy.identity(4))]
self.pt = self.p.copy() # transformed point cloud
self.t = [] # array of times for each iteration of ICP
self.err = [] # error for each iteration of ICP
self.Np = self.p.shape[0] # number of points in data cloud
# preprocessing finish, log time
self.t.append(time.time() - self.startTime)
print 'Time for preprocessing:', self.t[-1]
def __del__(self):
if self.matlab is not None:
del self.matlab
def runICP(self, **kwargs):
tStart = time.time()
# get 'global' tolerances
if 'x0' in kwargs:
kwargs['initX0'] = kwargs['x0'].copy()
if 'lb' in kwargs:
kwargs['initLB'] = kwargs['lb'].copy()
if 'ub' in kwargs:
kwargs['initUB'] = kwargs['ub'].copy()
# main ICP loop
for k in xrange(self.K):
t1 = time.time()
minDistances, nearestNeighbor = self.matching(self.pt)
# get indices of the points we are interested in
p_idx = numpy.ones(self.p.shape[0], dtype=numpy.bool) # since there are no edges we are interested in all the points
q_idx = nearestNeighbor
print '\tTime to calc min distance:', time.time() - t1
# TODO: Input filtering
# reject some % of worst matches
# Multiresolution sampling
# add error for first iteration
if k == 0:
t1 = time.time()
self.err.append(numpy.sqrt(numpy.sum(minDistances ** 2) / minDistances.shape[0]))
print '\tInitial RMS error: %f, Time to calc: %f' % (self.err[-1], time.time() - t1)
# generate rotation matrix and translation
t1 = time.time()
weights = self.weightMethod(nearestNeighbor)
# get current cumulative rotation/translation in independent variable values, this way we can change the iteration bounds so that the global bounds are not violated
cummulativeX0 = numpy.zeros(9)
rotMat, tx, ty, tz, sx, sy, sz = numpyTransform.decomposeMatrix(self.totalTransformMatrix[-1])
rx, ry, rz = numpyTransform.rotationMat2Euler(rotMat)
cummulativeX0[0] = rx
cummulativeX0[1] = ry
cummulativeX0[2] = rz
cummulativeX0[3] = tx
cummulativeX0[4] = ty
cummulativeX0[5] = tz
cummulativeX0[6] = sx
cummulativeX0[7] = sy
cummulativeX0[8] = sz
R, T, S = self.minimize(self.q[q_idx], self.pt[p_idx], weights=weights, cummulativeX0=cummulativeX0, **kwargs)
print '\tTime to calc new transformation:', time.time() - t1
# create combined transformation matrix, apply this relative transformation to current transformation
transformMatrix = numpy.matrix(numpy.identity(4))
transformMatrix *= T
transformMatrix *= R
transformMatrix *= S
self.totalTransformMatrix.append(self.totalTransformMatrix[-1] * transformMatrix)
# apply last transformation
t1 = time.time()
self.pt = numpyTransform.transformPoints(self.totalTransformMatrix[-1], self.p)
print '\tTime to applying transform to all points:', time.time() - t1
# root mean of objective function
t1 = time.time()
self.err.append(self.rms_error(self.q[q_idx], self.pt[p_idx]))
print '\tIteration %d RMS error: %f, Time to calc: %f' % (k + 1, self.err[-1], time.time() - t1)
# TODO: add extrapolation
# store time to get to this iteration
self.t.append(time.time() - self.startTime)
print 'Iteration %d took %7.3f seconds' % (k + 1, self.t[-1] - self.t[-2])
print 'Total ICP run time:', time.time() - tStart
return self.totalTransformMatrix, self.err, self.t
def matchingKDTree(self, points):
minDistances, nearestNeighborIndex = self.qKDTree.query(points)
return minDistances, nearestNeighborIndex
def matchingBruteForce(self, points):
nearestNeighborIndex = numpy.zeros(points.shape[0])
distances = cdist(points, self.q) # calculate all combination of point distances
minDistances = distances.min(axis=1)
for i in xrange(points.shape[0]):
nearestNeighborIndex[i] = numpy.where(distances[i] == minDistances[i])[0][0]
return minDistances, nearestNeighborIndex
def minimizePoint(self, q, p, **kwargs):
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
if 'weights' in kwargs:
weights = kwargs['weights']
else:
raise Warning('weights argument not supplied')
return R, T
# function [R,T] = eq_point(q,p,weights)
m = p.shape[0]
n = q.shape[0]
# normalize weights
weights = weights / weights.sum()
# find data centroid and deviations from centroid
q_bar = (numpy.mat(q.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
q_mark = q - numpy.tile(q_bar, n).reshape((n, 3))
# Apply weights
q_mark = q_mark * numpy.repeat(weights, 3).reshape((weights.shape[0], 3))
# find data centroid and deviations from centroid
p_bar = (numpy.mat(p.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
p_mark = p - numpy.tile(p_bar, m).reshape((m, 3))
# Apply weights
# p_mark = p_mark * numpy.repeat(weights, 3).reshape((weights.shape[0],3))
N = (numpy.mat(p_mark).T * numpy.mat(q_mark)).getA() # taking points of q in matched order
[U, Ss, V] = numpy.linalg.svd(N); # singular value decomposition
V = (numpy.mat(V).H).getA()
RMattemp = numpy.mat(V) * numpy.mat(U).T
Ttemp = (numpy.mat(q_bar).T - RMattemp * numpy.mat(p_bar).T).getA().squeeze()
R[:3, :3] = RMattemp.getA()
T = numpyTransform.translation(Ttemp)
return R, T, S
def minimizeMatlab(self, modelPoints, dataPoints, **kwargs):
if 'x0' in kwargs:
x0 = kwargs['x0']
else:
raise Exception('There are no variables to solve for')
# check for initial settings and bounds so that we can calculate current settings and bounds
if 'initX0' in kwargs:
initX0 = kwargs['initX0']
if 'initLB' in kwargs:
initLB = kwargs['initLB']
if 'initUB' in kwargs:
initUB = kwargs['initUB']
if 'cummulativeX0' in kwargs:
cummulativeX0 = kwargs['cummulativeX0']
# NOTE: I think this only works if x0/initX) is all zeros
ub = initUB - (cummulativeX0 - initX0)
lb = initLB - (cummulativeX0 - initX0)
# rounding errors can cause Bounds to be incorrect
i = ub < x0
if numpy.any(i):
print 'upper bounds less than x0'
ub[i] = x0[i] + 10 * numpy.spacing(x0[i])
i = lb > x0
if numpy.any(i):
print 'lower bounds less than x0'
lb[i] = x0[i] - 10 * numpy.spacing(x0[i])
# if x0.shape[0] > 6 or ('scaleOnly' in kwargs and kwargs['scaleOnly']):
# raise Exception('Scaling is not currently supported it will screw things up. Need some way to control scaling bounds so that it stays in global scaling bounds')
try:
if 'scaleOnly' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-3:], lb[-3:], ub[-3:], scaleOnly=kwargs['scaleOnly'])
elif 'scaleOnlyIso' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-1:], lb[-1:], ub[-1:], scaleOnlyIso=kwargs['scaleOnlyIso'])
else:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[:6], lb[:6], ub[:6]) # only rotation and translation
except:
sys.stderr.write('ERROR: Problem with matlab, closing matlab\n')
del self.matlab
self.matlab = None
return R, T, S
def minimizeCustom(self, p, q, **kwargs):
S = numpy.matrix(numpy.identity(4))
# TODO: try using functions from the nlopt module
def objectiveFunc(*args, **kwargs):
d = p
m = q
params = args[0]
if args[1].size > 0: # gradient
args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01]) # arbitrary gradient
# transform = numpy.matrix(numpy.identity(4))
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
Dicp = numpyTransform.transformPoints(transform, d)
# err = self.rms_error(m, Dicp)
err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
# err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
return err
x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if 'optAlg' in kwargs:
opt = nlopt.opt(kwargs['optAlg'], 6)
else:
opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)
opt.set_min_objective(objectiveFunc)
opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
opt.set_maxeval(1500)
params = opt.optimize(x0)
# output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
# params = output[0]
# params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)
# constraints = []
# varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = output[0]
# print 'Min error:', output[1]
# params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
return rotx * roty * rotz, S
def minimizePlane(self, p, q, **kwargs):
# TODO: Actually fill out
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
# function [R,T] = eq_plane(q,p,n,weights)
# n = n .* repmat(weights,3,1);
#
# c = cross(p,n);
#
# cn = vertcat(c,n);
#
# C = cn*transpose(cn);
#
# b = - [sum(sum((p-q).*repmat(cn(1,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(2,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(3,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(4,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(5,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(6,:),3,1).*n))];
#
# X = C\b;
#
# cx = cos(X(1)); cy = cos(X(2)); cz = cos(X(3));
# sx = sin(X(1)); sy = sin(X(2)); sz = sin(X(3));
#
# R = [cy*cz cz*sx*sy-cx*sz cx*cz*sy+sx*sz;
# cy*sz cx*cz+sx*sy*sz cx*sy*sz-cz*sx;
# -sy cy*sx cx*cy];
#
# T = X(4:6);
return R, T, S
def weightEqual(self, qIndices):
return numpy.ones(qIndices.shape[0])
def rms_error(self, a, b):
'''
Determine the RMS error between two point equally sized point clouds with point correspondence.
NOTE: a and b need to have equal number of points
'''
if a.shape[0] != b.shape[0]:
raise Exception('Input Point clouds a and b do not have the same number of points')
distSq = numpy.sum((a - b) ** 2, axis=1)
err = numpy.sqrt(numpy.mean(distSq))
return err
def demo(*args, **kwargs):
import math
m = 80 # width of grid
n = m ** 2 # number of points
minVal = -2.0
maxVal = 2.0
delta = (maxVal - minVal) / (m - 1)
X, Y = numpy.mgrid[minVal:maxVal + delta:delta, minVal:maxVal + delta:delta]
X = X.flatten()
Y = Y.flatten()
Z = numpy.sin(X) * numpy.cos(Y)
# Create the data point-matrix
M = numpy.array([X, Y, Z]).T
# Translation values (a.u.):
Tx = 0.5
Ty = -0.3
Tz = 0.2
# Translation vector
T = numpyTransform.translation(Tx, Ty, Tz)
S = numpyTransform.scaling(1.0, N=4)
# Rotation values (rad.):
rx = 0.3
ry = -0.2
rz = 0.05
Rx = numpy.matrix([[1, 0, 0, 0],
[0, math.cos(rx), -math.sin(rx), 0],
[0, math.sin(rx), math.cos(rx), 0],
[0, 0, 0, 1]])
Ry = numpy.matrix([[math.cos(ry), 0, math.sin(ry), 0],
[0, 1, 0, 0],
[-math.sin(ry), 0, math.cos(ry), 0],
[0, 0, 0, 1]])
Rz = numpy.matrix([[math.cos(rz), -math.sin(rz), 0, 0],
[math.sin(rz), math.cos(rz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Rotation matrix
R = Rx * Ry * Rz
transformMat = numpy.matrix(numpy.identity(4))
transformMat *= T
transformMat *= R
transformMat *= S
# Transform data-matrix plus noise into model-matrix
D = numpyTransform.transformPoints(transformMat, M)
# Add noise to model and data
M = M + 0.01 * numpy.random.randn(n, 3)
D = D + 0.01 * numpy.random.randn(n, 3)
# Run ICP (standard settings)
initialGuess = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
lowerBounds = numpy.array([-pi, -pi, -pi, -100.0, -100.0, -100.0])
upperBounds = numpy.array([pi, pi, pi, 100.0, 100.0, 100.0])
icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='fmincon', **kwargs)
# icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='point', **kwargs)
transform, err, t = icp.runICP(x0=initialGuess, lb=lowerBounds, ub=upperBounds)
# Transform data-matrix using ICP result
Dicp = numpyTransform.transformPoints(transform[-1], D)
# Plot model points blue and transformed points red
if False:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(D[:, 0], D[:, 1], D[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 3)
ax.plot(t, err, 'x--')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
plt.show()
else:
import visvis as vv
app = vv.use()
vv.figure()
vv.subplot(2, 2, 1)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(D[:, 0], D[:, 1], D[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('Red: z=sin(x)*cos(y), blue: transformed point cloud')
# Plot the results
vv.subplot(2, 2, 2)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('ICP result')
# Plot RMS curve
vv.subplot(2, 2, 3)
vv.plot(t, err, ls='--', ms='x')
vv.xlabel('time [s]')
vv.ylabel('d_{RMS}')
vv.title('KD-Tree matching')
if 'optAlg' in kwargs:
opt2 = nlopt.opt(kwargs['optAlg'], 2)
vv.title(opt2.get_algorithm_name())
del opt2
else:
vv.title('KD-Tree matching')
app.Run()
if __name__ == '__main__':
demo()
# demo2()
| mit | -712,446,531,366,426,000 | 37.842857 | 176 | 0.551273 | false |
YuxuanLing/trunk | trunk/code/study/python/Fluent-Python-example-code/17-futures/countries/flags2_common.py | 1 | 5430 | """Utilities for second set of flag examples.
"""
import os
import time
import sys
import string
import argparse
from collections import namedtuple
from enum import Enum
Result = namedtuple('Result', 'status data')
HTTPStatus = Enum('Status', 'ok not_found error')
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
DEFAULT_CONCUR_REQ = 1
MAX_CONCUR_REQ = 1
SERVERS = {
'REMOTE': 'http://flupy.org/data/flags',
'LOCAL': 'http://localhost:8001/flags',
'DELAY': 'http://localhost:8002/flags',
'ERROR': 'http://localhost:8003/flags',
}
DEFAULT_SERVER = 'LOCAL'
DEST_DIR = 'downloads/'
COUNTRY_CODES_FILE = 'country_codes.txt'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def initial_report(cc_list, actual_req, server_label):
if len(cc_list) <= 10:
cc_msg = ', '.join(cc_list)
else:
cc_msg = 'from {} to {}'.format(cc_list[0], cc_list[-1])
print('{} site: {}'.format(server_label, SERVERS[server_label]))
msg = 'Searching for {} flag{}: {}'
plural = 's' if len(cc_list) != 1 else ''
print(msg.format(len(cc_list), plural, cc_msg))
plural = 's' if actual_req != 1 else ''
msg = '{} concurrent connection{} will be used.'
print(msg.format(actual_req, plural))
def final_report(cc_list, counter, start_time):
elapsed = time.time() - start_time
print('-' * 20)
msg = '{} flag{} downloaded.'
plural = 's' if counter[HTTPStatus.ok] != 1 else ''
print(msg.format(counter[HTTPStatus.ok], plural))
if counter[HTTPStatus.not_found]:
print(counter[HTTPStatus.not_found], 'not found.')
if counter[HTTPStatus.error]:
plural = 's' if counter[HTTPStatus.error] != 1 else ''
print('{} error{}.'.format(counter[HTTPStatus.error], plural))
print('Elapsed time: {:.2f}s'.format(elapsed))
def expand_cc_args(every_cc, all_cc, cc_args, limit):
codes = set()
A_Z = string.ascii_uppercase
if every_cc:
codes.update(a+b for a in A_Z for b in A_Z)
elif all_cc:
with open(COUNTRY_CODES_FILE) as fp:
text = fp.read()
codes.update(text.split())
else:
for cc in (c.upper() for c in cc_args):
if len(cc) == 1 and cc in A_Z:
codes.update(cc+c for c in A_Z)
elif len(cc) == 2 and all(c in A_Z for c in cc):
codes.add(cc)
else:
msg = 'each CC argument must be A to Z or AA to ZZ.'
raise ValueError('*** Usage error: '+msg)
return sorted(codes)[:limit]
def process_args(default_concur_req):
server_options = ', '.join(sorted(SERVERS))
parser = argparse.ArgumentParser(
description='Download flags for country codes. '
'Default: top 20 countries by population.')
parser.add_argument('cc', metavar='CC', nargs='*',
help='country code or 1st letter (eg. B for BA...BZ)')
parser.add_argument('-a', '--all', action='store_true',
help='get all available flags (AD to ZW)')
parser.add_argument('-e', '--every', action='store_true',
help='get flags for every possible code (AA...ZZ)')
parser.add_argument('-l', '--limit', metavar='N', type=int,
help='limit to N first codes', default=sys.maxsize)
parser.add_argument('-m', '--max_req', metavar='CONCURRENT', type=int,
default=default_concur_req,
help='maximum concurrent requests (default={})'
.format(default_concur_req))
parser.add_argument('-s', '--server', metavar='LABEL',
default=DEFAULT_SERVER,
help='Server to hit; one of {} (default={})'
.format(server_options, DEFAULT_SERVER))
parser.add_argument('-v', '--verbose', action='store_true',
help='output detailed progress info')
args = parser.parse_args()
if args.max_req < 1:
print('*** Usage error: --max_req CONCURRENT must be >= 1')
parser.print_usage()
sys.exit(1)
if args.limit < 1:
print('*** Usage error: --limit N must be >= 1')
parser.print_usage()
sys.exit(1)
args.server = args.server.upper()
if args.server not in SERVERS:
print('*** Usage error: --server LABEL must be one of',
server_options)
parser.print_usage()
sys.exit(1)
try:
cc_list = expand_cc_args(args.every, args.all, args.cc, args.limit)
except ValueError as exc:
print(exc.args[0])
parser.print_usage()
sys.exit(1)
if not cc_list:
cc_list = sorted(POP20_CC)
return args, cc_list
def main(download_many, default_concur_req, max_concur_req):
args, cc_list = process_args(default_concur_req)
actual_req = min(args.max_req, max_concur_req, len(cc_list))
initial_report(cc_list, actual_req, args.server)
base_url = SERVERS[args.server]
t0 = time.time()
counter = download_many(cc_list, base_url, args.verbose, actual_req)
assert sum(counter.values()) == len(cc_list), \
'some downloads are unaccounted for'
final_report(cc_list, counter, t0)
| gpl-3.0 | 4,697,844,028,749,351,000 | 34.442953 | 75 | 0.573849 | false |
sandygiuliani/drug_repo2 | config.py | 1 | 7501 | # Copyright 2014 Sandra Giuliani
# config.py
# Configuration file for drug_repo.py
############################################################################
### PERSONAL INFO
############################################################################
# what is your name?
your_name = "Sandra"
# what is your email? (for NCBI Expasy and T-coffee)
your_email = "[email protected]"
############################################################################
############################################################################
### PIPELINE STEPS
############################################################################
# define which steps of the pipeline you wish to run
# integer between 0 and 10
# eg steps = 6 will run all steps up to (and including) 6
steps = 8
# step of the pipeline that requires modeller
# only change this if you have shuffled the main function!
modeller_step = 10
############################################################################
############################################################################
### TAXONOMY
############################################################################
# define list of taxa ids you are interested in
# eg ['SCHMA', 'SCHHA', 'SCHJA']
taxa = ['SCHMA']
# to identify a specific species, look up species name in speclist.txt
# to find the mnemonic code
# e.g. Schistosoma
# SCHMA (S. Mansoni), SCHHA (S. haematobium), SCHJA (S. japonicum)
# e.g Trypanosoma
# TRYB2 = Trypanosoma brucei brucei (strain 927/4 GUTat10.1)
# TRYB9 = Trypanosoma brucei gambiense (strain MHOM/CI/86/DAL972)
# TRYBB = Trypanosoma brucei brucei
# TRYBG = Trypanosoma brucei gambiense
# TRYBR = Trypanosoma brucei rhodesiense
# TRYCC = Trypanosoma cruzi (strain CL Brener)
# TRYCI = Trypanosoma congolense (strain IL3000)
# TRYCO = Trypanosoma congolense
# TRYCR = Trypanosoma cruzi
# TRYEQ = Trypanosoma equiperdum
# TRYEV = Trypanosoma evansi
# e.g. plasmodium (there are many others!)
# PLAF1 E 57265: N=Plasmodium falciparum (isolate 311)
############################################################################
############################################################################
### PATHS
############################################################################
# path to archindex binary
# old path "./../archSchema/bin/archindex" still valid on mac
# new path on linux machine "./../Arch/archindex"
archindex_path = "./../archSchema/bin/archindex"
# absolute path to SMSD directory (where SMSD.sh is)
# 1.5.1 - first version I have used (from sourceforge)
# 1.6 - version sent by Asad that should handle multiple sdf and keep ids
# "/home/sandra/SMSD1.6" on linux
# /Users/sandragiuliani/SMSD1.6 on mac
smsd_path = "/Users/sandragiuliani/SMSD1.6"
############################################################################
############################################################################
### SETS AND FILTERING SETTINGS
############################################################################
# which sets to analyse
# e.g. ['A'] -> just ChEMBL
# e.g. ['A', 'B'] -> both ChEMBL and DrugBank
sets = ['A']
dataset_dic = {'A': 'ChEMBL', 'B': 'DrugBank'}
# chembl filter settings
# define list of clinical phases you are interested in
# (only applies to ChEMBL set)
# eg. '4', '3', '' (empty string for the unknown phase)
chembl_phases = ['4']
# define molecule types you are interested in
chembl_mol_type = ['Synthetic Small Molecule']
############################################################################
############################################################################
### CLUSTERING SETTINGS
############################################################################
# define similarity threshold for clustering
# e.g. 0.9
sim_threshold = 0.9
############################################################################
############################################################################
### REPOSITIONING CANDIDATE
############################################################################
# repositioning candidate to be examined
# put CHEMBL or DB ID eg 'CHEMBL98', 'DB03292'
repo_candidate = 'CHEMBL973'
# target number, for selecting which drug target to align to the potential
# parasite targets.
# 0 is the first one (could be the only one), 1 the second one...
repo_target_no = 0
############################################################################
############################################################################
### HOMOLOGY MODEL
############################################################################
# number of homology models to make
model_no = 10
# alignment file - has to be in PIR format
model_align = '1d3h_schma.ali'
# template name - PDB ID of the crystal structure
model_xray = '1d3h'
# sequence to model name - arbitrary name, but has to match in the .ali file
model_seq = 'schma'
############################################################################
############################################################################
### INPUT_FILES
############################################################################
# input files (refer to README for source)
# drug file from ChEMBL ('Browse drugs') 'chembl_drugs.txt'
# number of drugs should be 10406
# FOR TESTING, use 'chembl_drugs_test.txt'
chembl_input = 'chembl_drugs.txt'
# define CHEMBL_TARGETS as the target file from ChEMBL ('Browse drug targets')
# number of drugs associated with targets should be 2007
chembl_targets = 'chembl_drugtargets.txt'
# define CHEMBL_UNIPROT as the chemblID/uniprot mapping file
chembl_uniprot = 'chembl_uniprot_mapping.txt'
# define DRUGBANK_INPUT as the DrugBank Drug Target Identifiers
# either: all_target_ids_all.csv (all drugs, 4,026 entries),
# or: small_molecule_target_ids_all.csv (small molecule drugs, 3,899 entries)
# FOR TESTING, use 'small_molecule_target_ids_all_test.csv'
drugbank_input = 'small_molecule_target_ids_all.csv'
# define sdf file with drugbank drugs (contains smiles)
drugbank_sdf = 'all.sdf'
# uniprot to pdb csv mapping file
# if necessary, uniprot_pdb.tsv (tsv version) can be retrieved
uniprot_pdb = "uniprot_pdb.csv"
# pdb to lig mapping file
pdb_lig = "lig_pairs.lst"
# pointless het groups
pointless_het = "pointless_het.csv"
# chemical component smiles dictionary
cc_smi = "Components-smiles-oe.smi"
# location of the species codes to species names mapping file
spec_list = 'speclist.txt'
# pdb to pfam residue mapping
pdb_to_pfam = 'pdb_pfam_mapping.txt'
# uniprot to cath residue mapping
uniprot_cath = 'arch_schema_cath.tsv'
############################################################################
############################################################################
### OUTPUT_FILES
############################################################################
# define names of output files, they will be overwritten every time
# if you do not want that to happen, add a timestamp to the file names
# 'dr' stands for drug repositioning
# other temporary files will also be named dr_*
# log
log_name = 'dr_log.log'
#tcoffee log
t_coffee = 'dr_tcoffee.log'
# chembl similarity scores written to file
chembl_clust_sim_scores = 'dr_chembl_clust_sim_scores.txt'
# chembl cluster to be imported in excel
# clustered drugs with info from chembl! (no mapping info)
chembl_cluster = 'dr_chembl_clust_excel.txt'
############################################################################ | mit | 6,203,926,662,967,334,000 | 32.19469 | 78 | 0.503933 | false |
blackshirt/simpletrain | models.py | 1 | 1532 | '''
Database Models
Models defined here
===================
1. User
2. Employee
3. Workplace
4. Training
5. Letter
6. LearningAssignment
'''
import os
from datetime import date, datetime
from pony.orm import Database, sql_debug
from pony.orm import Required, Optional, Set
__all__ = ['db', 'User', 'Employee', 'Workplace', 'Training', 'Letter',
'LearningAssignment']
db = Database()
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
support_dir = os.path.join(dir_path, "support")
class Base(db.Entity):
name = Required(str)
class User(Base):
email = Optional(str)
password = Optional(str)
dob = Optional(date)
pob = Optional(str)
utype = Optional(str, default='guest')
class Employee(User):
nip = Required(str)
class Workplace(Base):
address = Optional(str)
city = Optional(str)
class SendRecvMixin(User, Workplace):
pass
class Sender(SendRecvMixin):
letter = Set("Letter")
class Receiver(SendRecvMixin):
letter = Set("Letter")
class Letter(db.Entity):
about = Required(str)
date = Optional(date)
number = Optional(str)
sender = Optional(Sender)
receiver = Set(Receiver)
inputed_at = Optional(datetime, default=datetime.now)
last_updated = Optional(datetime, default=datetime.now)
class Training(db.Entity):
title = Required(str)
class LearningAssignment(db.Entity):
about = Required(str)
sql_debug(True)
db.bind("sqlite", ":memory:", create_db=True)
db.generate_mapping(create_tables=True)
| mit | -565,255,332,013,163,500 | 17.238095 | 71 | 0.673629 | false |
belatrix/BackendAllStars | activities/urls.py | 1 | 1168 | from .views import send_message_all, send_message_to, send_message_location, send_message_event
from .views import get_activities, get_messages, get_messages_from, get_messages_from_all, get_notifications
from django.conf.urls import url
urlpatterns = [
url(r'^send/message/all/$', send_message_all, name='send_message_all'),
url(r'^send/message/to/(?P<employee_username>\w+)/$', send_message_to, name='send_message_to'),
url(r'^send/message/location/(?P<location_id>\d+)/$', send_message_location, name='send_message_location'),
url(r'^send/message/event/(?P<event_id>\d+)/$', send_message_event, name='send_message_event'),
url(r'^get/activity/employee/(?P<employee_id>\d+)/all/$', get_activities, name='get_activities'),
url(r'^get/message/employee/(?P<employee_id>\d+)/all/$', get_messages, name='get_messages'),
url(r'^get/message/from/employee/all/$', get_messages_from_all, name='get_messages_from_all'),
url(r'^get/message/from/employee/(?P<employee_id>\d+)/all/$', get_messages_from, name='get_messages_from'),
url(r'^get/notification/employee/(?P<employee_id>\d+)/all/$', get_notifications, name='get_notifications'),
]
| apache-2.0 | 4,920,638,106,448,543,000 | 72 | 111 | 0.696062 | false |
hansonrobotics/chatbot | test/test_chatbot.py | 1 | 7163 | #!/usr/bin/env python
import unittest
import os
import sys
import time
import subprocess
import signal
RCFILE = os.environ.get('COVERAGE_RCFILE', '.coveragerc')
from chatbot.client import Client
class ChatbotTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.port = '8002'
self.cwd = os.path.abspath(os.path.dirname(__file__))
os.environ['HR_CHARACTER_PATH'] = os.path.join(self.cwd, 'characters')
server = os.path.join(self.cwd, '../scripts/run_server.py')
cmd = ['coverage', 'run', '--rcfile', RCFILE, server, '-p', self.port]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
@classmethod
def tearDownClass(self):
if self.proc:
while self.proc.poll() is None:
try:
print "Shuting down"
self.proc.send_signal(signal.SIGINT)
except OSError as ex:
print ex
time.sleep(0.2)
try:
os.killpg(self.proc.pid, 2)
except OSError as ex:
print "Killed"
def test_pyaiml(self):
script = os.path.join(self.cwd, os.path.sep.join(
['..', 'src', 'chatbot', 'aiml', 'Kernel.py']))
cmd = 'python ' + script
ret = os.system(cmd)
self.assertTrue(ret == 0)
def test_prologue(self):
cli = Client('AAAAB3NzaC', username='test_client', port=self.port, test=True)
while not cli.ping():
time.sleep(1)
cli.do_conn('localhost:' + self.port)
cli.do_select('generic')
response = cli.ask('hello sophia')
self.assertTrue(response.get('text') == 'Hi there from generic')
cli.do_select('sophia')
response = cli.ask('hello sophia')
self.assertTrue(response.get('text') == 'Hi there from sophia')
def test_two_clients(self):
cli = Client('AAAAB3NzaC', botname='generic', port=self.port, test=True)
cli2 = Client('AAAAB3NzaC', botname='sophia', port=self.port, test=True)
while not cli.ping():
time.sleep(1)
cli.do_conn('localhost:' + self.port)
cli2.do_conn('localhost:' + self.port)
response = cli.ask('hello sophia')
self.assertTrue(response.get('text') == 'Hi there from generic')
response = cli2.ask('hello sophia')
self.assertTrue(response.get('text') == 'Hi there from sophia')
def test_session_manager(self):
from chatbot.server.session import SessionManager
session_manager = SessionManager(False)
sid = session_manager.start_session(user='test', key='key', test=True)
session = session_manager.get_session(sid)
self.assertIsNotNone(session)
self.assertIsNone(session.cache.last_time)
self.assertTrue(session.add("hi", "hi there"))
self.assertIsNotNone(session.cache.last_time)
session_manager.reset_session(sid)
self.assertIsNotNone(session)
self.assertIsNone(session.cache.last_time)
session_manager.remove_session(sid)
self.assertFalse(session.add("hi", "hi there"))
session = session_manager.get_session(sid)
self.assertIsNone(session)
def test_session_manager_auto(self):
import chatbot.server.config
chatbot.server.config.SESSION_REMOVE_TIMEOUT = 2
from chatbot.server.session import SessionManager
reload(chatbot.server.session)
session_manager = SessionManager(True)
sid = session_manager.start_session(user='test', key='key', test=True)
session = session_manager.get_session(sid)
self.assertIsNotNone(session)
self.assertIsNone(session.cache.last_time)
time.sleep(0.5)
# session cache should have record
self.assertTrue(session.add("hi", "hi there"))
self.assertIsNotNone(session.cache.last_time)
# session should not be removed
time.sleep(1)
self.assertIsNotNone(session.cache.last_time)
# session should be removed
time.sleep(1.5)
self.assertFalse(session.add("hi", "hi there"))
session = session_manager.get_session(sid)
self.assertIsNone(session)
def test_chat_agent(self):
from chatbot.server.chatbot_agent import session_manager, ask
sid = session_manager.start_session(user='test', key='key', test=True)
sess = session_manager.get_session(sid)
sess.sdata.botname = 'sophia'
sess.sdata.user = 'test'
response, ret = ask('what is your name', 'en', sid)
self.assertEqual(ret, 0)
response, ret = ask('', 'en', sid)
self.assertEqual(ret, 4)
response, ret = ask(None, 'en', sid)
self.assertEqual(ret, 4)
def test_loader(self):
from chatbot.server.loader import load_characters
character_path = os.environ.get('HR_CHARACTER_PATH')
characters = load_characters(character_path)
names = [character.name for character in characters]
self.assertEqual(names, ['dummy', 'generic', 'sophia'])
def test_polarity(self):
from chatbot.polarity import Polarity
p = Polarity()
p.load_sentiment_csv(os.path.join(
self.cwd, '../scripts/aiml/senticnet3.props.csv'))
self.assertTrue(p.get_polarity("The dish is yucky") < 0)
self.assertTrue(p.get_polarity("The weather is nice") > 0)
def test_util(self):
import chatbot.utils as utils
text = '''My mind is built using Hanson Robotics' character engine, a simulated humanlike brain that runs inside a personal computer. Within this framework, Hanson has modelled Phil's personality and emotions, allowing you to talk with Phil through me, using speech recognition, natural language understanding, and computer vision such as face recognition, and animation of the robotic muscles in my face.'''
text2 = utils.shorten(text, 123)
self.assertTrue(len(text2) <= 123)
text2 = utils.shorten(text, 0)
self.assertTrue(len(text2) > 0)
self.assertTrue(utils.str_cleanup(' . ') == '')
self.assertTrue(utils.str_cleanup(' .ss ') == 'ss')
self.assertTrue(utils.str_cleanup(' s.ss ') == 's.ss')
self.assertTrue(utils.str_cleanup(None) is None)
self.assertTrue(utils.check_online('google.com'))
self.assertTrue(utils.check_online('google.com', 80))
self.assertTrue(not utils.check_online('google.com', 81))
def test_words2num(self):
from chatbot.words2num import words2num
self.assertTrue(words2num('one hundred trillion and twelve') == 100000000000012)
self.assertTrue(words2num('one hundred trillion twelve hundred and 21') == 100000000001221)
self.assertTrue(words2num("one hundred and seventy nine") == 179)
self.assertTrue(words2num("thirteen hundred") == 1300)
self.assertTrue(words2num("nine thousand two hundred and ninety seven") == 9297)
self.assertTrue(words2num(None) is None)
self.assertTrue(words2num("zero") == 0)
if __name__ == '__main__':
unittest.main()
| mit | 1,190,334,528,366,401,300 | 39.931429 | 416 | 0.629066 | false |
jktubs/ctSESAM-python-memorizing | Crypter.py | 1 | 1892 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Encryption and decryption module.
"""
from Crypto.Cipher import AES
from hashlib import pbkdf2_hmac
class Crypter:
"""
Encrypt and decrypt with AES in CBC mode with PKCS7 padding. The constructor calculates the key from the given
password and salt with PBKDF2 using HMAC with SHA512 and 32768 iterations.
"""
def __init__(self, salt, password):
self.iv = b'\xb5\x4f\xcf\xb0\x88\x09\x55\xe5\xbf\x79\xaf\x37\x71\x1c\x28\xb6'
self.key = pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 32768)[:32]
@staticmethod
def add_pkcs7_padding(data):
"""
Adds PKCS7 padding so it can be divided into full blocks of 16 bytes.
:param bytes data: data without padding
:return: padded data
:rtype: bytes
"""
length = 16 - (len(data) % 16)
data += bytes([length])*length
return data
def encrypt(self, data):
"""
Encrypts with AES in CBC mode with PKCS7 padding.
:param bytes data: data for encryption
:return: encrypted data
:rtype: bytes
"""
aes_object = AES.new(self.key, AES.MODE_CBC, self.iv)
return aes_object.encrypt(self.add_pkcs7_padding(data))
@staticmethod
def remove_pkcs7_padding(data):
"""
Removes the PKCS7 padding.
:param bytes data: padded data
:return: data without padding
:rtype: bytes
"""
return data[:-data[-1]]
def decrypt(self, encrypted_data):
"""
Decrypts with AES in CBC mode with PKCS7 padding.
:param bytes encrypted_data: encrypted data
:return: decrypted data
:rtype: bytes
"""
aes_object = AES.new(self.key, AES.MODE_CBC, self.iv)
return self.remove_pkcs7_padding(aes_object.decrypt(encrypted_data))
| gpl-3.0 | -7,594,151,041,902,287,000 | 28.5625 | 114 | 0.609937 | false |
stormi/tsunami | src/secondaires/familier/fiche.py | 1 | 5370 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe FicheFamilier, détaillée plus bas."""
from abstraits.obase import BaseObj
from secondaires.familier.script import ScriptFiche
class FicheFamilier(BaseObj):
"""Classe représentant une fiche de familier.
Un familier est défini sur une fiche, tout comme les PNJ sont
définis sur les prototypes ou comme les matelots sont définis
sur des fiches de matelots. La fiche contient des informations
générales sur le PNJ (chaque prototype de PNJ a une fiche).
Par exemple, un cheval (prototype de PNJ 'cheval') peut avoir
une fiche de familier ('cheval'). Dans cette fiche, il est déclaré
que le cheval est un herbivore. Tous les chevau créés sur le
prototype 'cheval' ('cheval_0', 'cheval_1', 'cheval_2', ...)
pourront être des familiers qui utiliseront alors cette fiche.
"""
enregistrer = True
nom_scripting = "familier"
type_achat = "familier"
def __init__(self, cle):
"""Constructeur de la fiche."""
BaseObj.__init__(self)
self.cle = cle
self.regime = "herbivore"
self.monture = False
self.sorties_verticales = False
self.aller_interieur = False
self.stats_progres = ["force", "agilite", "robustesse", "intelligence"]
self.difficulte_apprivoisement = 10
self.harnachements = []
self.m_valeur = 50
self.script = ScriptFiche(self)
def __getnewargs__(self):
return ("", )
def __repr__(self):
return "<FicheFamilier {}>".format(self.cle)
def __str__(self):
return self.cle
@property
def prototype(self):
"""Retourne le prototype de PNJ associé."""
return importeur.pnj.prototypes.get(self.cle)
@property
def familiers(self):
"""Retourne la liste des familiers créés sur cette fiche.
ATTENTION : cette méthode retourne les familiers, pas les
PNJ. Un PNJ peut être créé sur le prototype de PNJ sans qu'il
apparaisse dans cette liste.
"""
familiers = list(importeur.familier.familiers.values())
familiers = [f for f in familiers if f.cle == self.cle]
return familiers
@property
def str_harnachements(self):
return ", ".join(sorted(self.harnachements))
@property
def str_stats_progres(self):
return ", ".join(sorted(self.stats_progres))
@property
def nom_achat(self):
return self.prototype and self.prototype.nom_singulier or "inconnu"
@property
def nom_singulier(self):
return self.prototype and self.prototype.nom_singulier or "inconnu"
@property
def nom_pluriel(self):
return self.prototype and self.prototype.nom_pluriel or "inconnus"
def get_nom(self, nombre=1):
"""Retourne le nom complet en fonction du nombre."""
if nombre == 0:
raise ValueError("Nombre invalide")
elif nombre == 1:
return self.nom_singulier
else:
return str(nombre) + " " + self.nom_pluriel
def acheter(self, quantite, magasin, transaction):
"""Achète les familiers dans la quantité spécifiée."""
salle = magasin.parent
acheteur = transaction.initiateur
i = 0
while i < quantite:
i += 1
pnj = importeur.pnj.creer_PNJ(self.prototype)
pnj.salle = salle
familier = importeur.familier.creer_familier(pnj)
familier.maitre = acheteur
familier.trouver_nom()
salle.envoyer("{} arrive.", pnj)
def regarder(self, personnage):
"""Le personnage regarde le familier avant achat."""
desc = self.prototype.description.regarder(personnage,
elt=self.prototype)
return desc
| bsd-3-clause | -3,066,334,919,988,521,500 | 35.868966 | 79 | 0.674336 | false |
fernandog/Medusa | lib/tmdbsimple/tv.py | 1 | 18419 | # -*- coding: utf-8 -*-
"""
tmdbsimple.tv
~~~~~~~~~~~~~
This module implements the TV, TV Seasons, TV Episodes, and Networks
functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2018 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class TV(TMDB):
"""
TV functionality.
See: https://developers.themoviedb.org/3/tv
"""
BASE_PATH = 'tv'
URLS = {
'info': '/{id}',
'alternative_titles': '/{id}/alternative_titles',
'content_ratings': '/{id}/content_ratings',
'credits': '/{id}/credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'rating': '/{id}/rating',
'similar': '/{id}/similar',
'recommendations': '/{id}/recommendations',
'translations': '/{id}/translations',
'videos': '/{id}/videos',
'changes': '/{id}/changes',
'latest': '/latest',
'on_the_air': '/on_the_air',
'airing_today': '/airing_today',
'top_rated': '/top_rated',
'popular': '/popular',
}
def __init__(self, id=0):
super(TV, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the primary information about a TV series by id.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def alternative_titles(self, **kwargs):
"""
Get the alternative titles for a specific tv id.
Args:
language: (optional) ISO 3166-1 code.
append_to_response: (optional) Comma separated, any tv method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('alternative_titles')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def content_ratings(self, **kwargs):
"""
Get the content ratings for a TV Series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('content_ratings')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew information about a TV series. Just like the
website, we pull this information from the last season of the series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV series.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters and backdrops) for a TV series.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV show. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def similar(self, **kwargs):
"""
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def recommendations(self, **kwargs):
"""
Get the recommendations for TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def translations(self, **kwargs):
"""
Get the list of translations that exist for a TV series. These
translations cascade down to the episode level.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('translations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV series (trailers, opening
credits, etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific series id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The
maximum number of days that can be returned in a single request is 14.
The language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the most newly created TV show. This is a live response
and will continuously change.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def on_the_air(self, **kwargs):
"""
Get the list of TV shows that are currently on the air. This query
looks for any TV show that has an episode with an air date in the
next 7 days.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('on_the_air')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def airing_today(self, **kwargs):
"""
Get the list of TV shows that air today. Without a specified timezone,
this query defaults to EST (Eastern Time UTC-05:00).
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
timezone: (optional) Valid value from the list of timezones.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('airing_today')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def top_rated(self, **kwargs):
"""
Get the list of top rated TV shows. By default, this list will only
include TV shows that have 2 or more votes. This list refreshes every
day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('top_rated')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular TV shows. This list refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Seasons(TMDB):
"""
TV Seasons functionality.
See: https://developers.themoviedb.org/3/tv-seasons
"""
BASE_PATH = 'tv/{series_id}/season/{season_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'videos': '/videos',
}
def __init__(self, series_id, season_number):
super(TV_Seasons, self).__init__()
self.series_id = series_id
self.season_number = season_number
def info(self, **kwargs):
"""
Get the primary information about a TV season by its season number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew credits for a TV season by season number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters) that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV season (trailers, teasers,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Episodes(TMDB):
"""
TV Episodes functionality.
See: https://developers.themoviedb.org/3/tv-episodes
"""
BASE_PATH = 'tv/{series_id}/season/{season_number}/episode/{episode_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'rating': '/rating',
'videos': '/videos',
}
def __init__(self, series_id, season_number, episode_number):
super(TV_Episodes, self).__init__()
self.series_id = series_id
self.season_number = season_number
self.episode_number = episode_number
def info(self, **kwargs):
"""
Get the primary information about a TV episode by combination of a
season and episode number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a TV episode by combination of a season and
episode number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path(
'external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (episode stills) for a TV episode by combination of a
season and episode number. Since episode stills don't have a language,
this call will always return all images.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV episode. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV episode (teasers, clips,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Networks(TMDB):
"""
Networks functionality.
See: https://developers.themoviedb.org/3/networks
"""
BASE_PATH = 'network'
URLS = {
'info': '/{id}',
}
def __init__(self, id):
super(Networks, self).__init__()
self.id = id
def info(self, **kwargs):
"""
This method is used to retrieve the basic information about a TV
network. You can use this ID to search for TV shows with the discover.
At this time we don't have much but this will be fleshed out over time.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| gpl-3.0 | -6,584,255,332,790,810,000 | 28.708065 | 80 | 0.569846 | false |
oskgeek/liftpass | config.py | 1 | 2493 | import sys
import os
# ------------------------------------------------------------------------------
# User Authentication - Liftpass is a single user application (for now)
# ------------------------------------------------------------------------------
UserKey = b'd759214482924d10ac159b794e9424e7'
UserSecret = b'4bf5d2c68e444ecab4d50adf8590544c'
# ------------------------------------------------------------------------------
# Paths
# ------------------------------------------------------------------------------
BasePath = os.path.abspath('./')
sys.path.append(BasePath)
# Where application data is stored
DataPath = os.path.join(BasePath,'data/')
# ------------------------------------------------------------------------------
# Analytics Storage - Where SDK updates are stored before being processed
# ------------------------------------------------------------------------------
AnalyticsStorage = {
'engine': 'core.storage.filesystem',
'path': os.path.join(DataPath, 'analytics/'),
'update': 600,
}
# ------------------------------------------------------------------------------
# Content Database - Where application content and settings are stored
# ------------------------------------------------------------------------------
ContentDatabase = {
'address': 'sqlite:///%s/content.db'%DataPath,
'debug': False
}
# ------------------------------------------------------------------------------
# Pricing Engine - Where data for prices are stored
# ------------------------------------------------------------------------------
PricingStorage = {
'engine': 'core.storage.filesystem',
'path': os.path.join(DataPath, 'prices/')
}
# ------------------------------------------------------------------------------
# Monitoring - records server activity and performance (not yet supported)
# ------------------------------------------------------------------------------
MonitorEngine = None
# ------------------------------------------------------------------------------
# Debug Terminal - caches user updates for debuging
# ------------------------------------------------------------------------------
DashboardTerminal = {
'engine': 'core.terminal.local',
'path': os.path.join(DataPath, 'terminal/')
}
# ------------------------------------------------------------------------------
# API Interface Service
# ------------------------------------------------------------------------------
APIServer = {
'address': '127.0.0.1',
'port': 9090,
'cors': True
}
| apache-2.0 | -6,309,304,441,272,448,000 | 37.953125 | 80 | 0.349779 | false |
Paulloz/godot | glsl_builders.py | 3 | 7347 | """Functions used to generate source files during build time
All such functions are invoked in a subprocess on Windows to prevent build flakiness.
"""
from platform_methods import subprocess_main
class RDHeaderStruct:
def __init__(self):
self.vertex_lines = []
self.fragment_lines = []
self.compute_lines = []
self.vertex_included_files = []
self.fragment_included_files = []
self.compute_included_files = []
self.reading = ""
self.line_offset = 0
self.vertex_offset = 0
self.fragment_offset = 0
self.compute_offset = 0
def include_file_in_rd_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
while line:
if line.find("#[vertex]") != -1:
header_data.reading = "vertex"
line = fs.readline()
header_data.line_offset += 1
header_data.vertex_offset = header_data.line_offset
continue
if line.find("#[fragment]") != -1:
header_data.reading = "fragment"
line = fs.readline()
header_data.line_offset += 1
header_data.fragment_offset = header_data.line_offset
continue
if line.find("#[compute]") != -1:
header_data.reading = "compute"
line = fs.readline()
header_data.line_offset += 1
header_data.compute_offset = header_data.line_offset
continue
while line.find("#include ") != -1:
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if not included_file in header_data.vertex_included_files and header_data.reading == "vertex":
header_data.vertex_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif not included_file in header_data.fragment_included_files and header_data.reading == "fragment":
header_data.fragment_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif not included_file in header_data.compute_included_files and header_data.reading == "compute":
header_data.compute_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
line = fs.readline()
line = line.replace("\r", "")
line = line.replace("\n", "")
if header_data.reading == "vertex":
header_data.vertex_lines += [line]
if header_data.reading == "fragment":
header_data.fragment_lines += [line]
if header_data.reading == "compute":
header_data.compute_lines += [line]
line = fs.readline()
header_data.line_offset += 1
fs.close()
return header_data
def build_rd_header(filename):
header_data = RDHeaderStruct()
include_file_in_rd_header(filename, header_data, 0)
out_file = filename + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1 :]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1 :]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "_RD\n")
fd.write("#define " + out_file_ifdef + "_RD\n")
out_file_class = out_file_base.replace(".glsl.gen.h", "").title().replace("_", "").replace(".", "") + "ShaderRD"
fd.write("\n")
fd.write('#include "servers/rendering/rasterizer_rd/shader_rd.h"\n\n')
fd.write("class " + out_file_class + " : public ShaderRD {\n\n")
fd.write("public:\n\n")
fd.write("\t" + out_file_class + "() {\n\n")
if len(header_data.compute_lines):
fd.write("\t\tstatic const char _compute_code[] = {\n")
for x in header_data.compute_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write('\t\tsetup(nullptr, nullptr, _compute_code, "' + out_file_class + '");\n')
fd.write("\t}\n")
else:
fd.write("\t\tstatic const char _vertex_code[] = {\n")
for x in header_data.vertex_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const char _fragment_code[]={\n")
for x in header_data.fragment_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write('\t\tsetup(_vertex_code, _fragment_code, nullptr, "' + out_file_class + '");\n')
fd.write("\t}\n")
fd.write("};\n\n")
fd.write("#endif\n")
fd.close()
def build_rd_headers(target, source, env):
for x in source:
build_rd_header(str(x))
class RAWHeaderStruct:
def __init__(self):
self.code = ""
def include_file_in_raw_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
text = ""
while line:
while line.find("#include ") != -1:
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
include_file_in_raw_header(included_file, header_data, depth + 1)
line = fs.readline()
header_data.code += line
line = fs.readline()
fs.close()
def build_raw_header(filename):
header_data = RAWHeaderStruct()
include_file_in_raw_header(filename, header_data, 0)
out_file = filename + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file.replace(".glsl.gen.h", "_shader_glsl")
out_file_base = out_file_base[out_file_base.rfind("/") + 1 :]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1 :]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "_RAW_H\n")
fd.write("#define " + out_file_ifdef + "_RAW_H\n")
fd.write("\n")
fd.write("static const char " + out_file_base + "[] = {\n")
for c in header_data.code:
fd.write(str(ord(c)) + ",")
fd.write("\t\t0};\n\n")
fd.write("#endif\n")
fd.close()
def build_rd_headers(target, source, env):
for x in source:
build_rd_header(str(x))
def build_raw_headers(target, source, env):
for x in source:
build_raw_header(str(x))
if __name__ == "__main__":
subprocess_main(globals())
| mit | -8,014,658,679,955,484,000 | 31.653333 | 116 | 0.559412 | false |
jgmanzanas/CMNT_004_15 | project-addons/vies_timestamp/fiscal_position.py | 1 | 1201 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea Servicios Tecnológicos <www.comunitea.com>
# $Omar Castiñeira Saavedra <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class AccountFiscalPosition(models.Model):
_inherit = "account.fiscal.position"
require_vies_validation = fields.Boolean("Require vies validation")
| agpl-3.0 | 8,867,831,660,095,343,000 | 40.344828 | 78 | 0.633862 | false |
lionaneesh/sugarlabs-calculate | toolbars.py | 1 | 14126 | # -*- coding: utf-8 -*-
# toolbars.py, see CalcActivity.py for info
import pygtk
pygtk.require('2.0')
import gtk
from mathlib import MathLib
from sugar.graphics.palette import Palette
from sugar.graphics.menuitem import MenuItem
from sugar.graphics.toolbutton import ToolButton
from sugar.graphics.toggletoolbutton import ToggleToolButton
from sugar.graphics.style import GRID_CELL_SIZE
import logging
_logger = logging.getLogger('calc-activity')
from gettext import gettext as _
def _icon_exists(name):
if name == '':
return False
theme = gtk.icon_theme_get_default()
info = theme.lookup_icon(name, 0, 0)
if info:
return True
return False
class IconToolButton(ToolButton):
def __init__(self, icon_name, text, cb, help_cb=None, alt_html=''):
ToolButton.__init__(self)
if _icon_exists(icon_name):
self.set_icon(icon_name)
else:
if alt_html == '':
alt_html = icon_name
label = gtk.Label()
label.set_markup(alt_html)
label.show()
self.set_label_widget(label)
self.create_palette(text, help_cb)
self.connect('clicked', cb)
def create_palette(self, text, help_cb):
p = Palette(text)
if help_cb is not None:
item = MenuItem(_('Help'), 'action-help')
item.connect('activate', help_cb)
item.show()
p.menu.append(item)
self.set_palette(p)
class IconToggleToolButton(ToggleToolButton):
def __init__(self, items, cb, desc):
ToggleToolButton.__init__(self)
self.items = items
if 'icon' in items[0] and _icon_exists(items[0]['icon']):
self.set_named_icon(items[0]['icon'])
elif 'html' in items[0]:
self.set_label(items[0]['html'])
# self.set_tooltip(items[0][1])
self.set_tooltip(desc)
self.selected = 0
self.connect('clicked', self.toggle_button)
self.callback = cb
def toggle_button(self, w):
self.selected = (self.selected + 1) % len(self.items)
but = self.items[self.selected]
if 'icon' in but and _icon_exists(but['icon']):
self.set_named_icon(but['icon'])
elif 'html' in but:
_logger.info('Setting html: %s', but['html'])
self.set_label(but['html'])
# self.set_tooltip(but[1])
if self.callback is not None:
if 'html' in but:
self.callback(but['html'])
else:
self.callback(but)
class TextToggleToolButton(gtk.ToggleToolButton):
def __init__(self, items, cb, desc, index=False):
gtk.ToggleToolButton.__init__(self)
self.items = items
self.set_label(items[0])
self.selected = 0
self.connect('clicked', self.toggle_button)
self.callback = cb
self.index = index
self.set_tooltip_text(desc)
def toggle_button(self, w):
self.selected = (self.selected + 1) % len(self.items)
but = self.items[self.selected]
self.set_label(but)
if self.callback is not None:
if self.index:
self.callback(self.selected)
else:
self.callback(but)
class LineSeparator(gtk.SeparatorToolItem):
def __init__(self):
gtk.SeparatorToolItem.__init__(self)
self.set_draw(True)
class EditToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
copy_tool = ToolButton('edit-copy')
copy_tool.set_tooltip(_('Copy'))
copy_tool.set_accelerator(_('<ctrl>c'))
copy_tool.connect('clicked', lambda x: calc.text_copy())
self.insert(copy_tool, -1)
menu_item = MenuItem(_('Cut'))
try:
menu_item.set_accelerator(_('<ctrl>x'))
except AttributeError:
pass
menu_item.connect('activate', lambda x: calc.text_cut())
menu_item.show()
copy_tool.get_palette().menu.append(menu_item)
self.insert(IconToolButton('edit-paste', _('Paste'),
lambda x: calc.text_paste(),
alt_html='Paste'), -1)
self.show_all()
class AlgebraToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('algebra-square', _('Square'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '**2'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(square)'),
alt_html='x<sup>2</sup>'), -1)
self.insert(IconToolButton('algebra-sqrt', _('Square root'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sqrt'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sqrt)'),
alt_html='√x'), -1)
self.insert(IconToolButton('algebra-xinv', _('Inverse'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '**-1'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(inv)'),
alt_html='x<sup>-1</sup>'), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('algebra-exp', _('e to the power x'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'exp'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(exp)'),
alt_html='e<sup>x</sup>'), -1)
self.insert(IconToolButton('algebra-xpowy', _('x to the power y'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'pow'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(pow)'),
alt_html='x<sup>y</sup>'), -1)
self.insert(IconToolButton('algebra-ln', _('Natural logarithm'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'ln'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(ln)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('algebra-fac', _('Factorial'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'fac'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(fac)')), -1)
self.show_all()
class TrigonometryToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('trigonometry-sin', _('Sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sin'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sin)')), -1)
self.insert(IconToolButton('trigonometry-cos', _('Cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'cos'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(cos)')), -1)
self.insert(IconToolButton('trigonometry-tan', _('Tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'tan'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(tan)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('trigonometry-asin', _('Arc sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'asin'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(asin)')), -1)
self.insert(IconToolButton('trigonometry-acos', _('Arc cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'acos'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(acos)')), -1)
self.insert(IconToolButton('trigonometry-atan', _('Arc tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'atan'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(atan)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('trigonometry-sinh', _('Hyperbolic sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sinh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sinh)')), -1)
self.insert(IconToolButton('trigonometry-cosh', _('Hyperbolic cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'cosh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(cosh)')), -1)
self.insert(IconToolButton('trigonometry-tanh', _('Hyperbolic tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'tanh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(tanh)')), -1)
self.show_all()
class BooleanToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('boolean-and', _('Logical and'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '&'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(And)')), -1)
self.insert(IconToolButton('boolean-or', _('Logical or'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '|'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(Or)')), -1)
# self.insert(IconToolButton('boolean-xor', _('Logical xor'),
# lambda x: calc.button_pressed(calc.TYPE_OP_POST, '^'),
# lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(xor)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('boolean-eq', _('Equals'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '==')), -1)
self.insert(IconToolButton('boolean-neq', _('Not equals'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '!=')), -1)
self.show_all()
class MiscToolbar(gtk.Toolbar):
def __init__(self, calc, target_toolbar=None):
self._target_toolbar = target_toolbar
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('constants-pi', _('Pi'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'pi'),
alt_html='π'), -1)
self.insert(IconToolButton('constants-e', _('e'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'e')), -1)
self.insert(IconToolButton('constants-eulersconstant', _('γ'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, '0.577215664901533')), -1)
self.insert(IconToolButton('constants-goldenratio', _('φ'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, '1.618033988749895')), -1)
self._line_separator1 = LineSeparator()
self._line_separator2 = LineSeparator()
self._plot_button = IconToolButton('plot', _('Plot'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'plot'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(plot)'))
el = [
{'icon': 'format-deg', 'desc': _('Degrees'), 'html': 'deg'},
{'icon': 'format-rad', 'desc': _('Radians'), 'html': 'rad'},
]
self._angle_button = IconToggleToolButton(
el,
lambda x: self.update_angle_type(x, calc),
_('Degrees / Radians'))
self.update_angle_type('deg', calc)
el = [
{'icon': 'format-sci', 'html': 'sci'},
{'icon': 'format-exp', 'html': 'exp'},
]
self._format_button = IconToggleToolButton(
el,
lambda x: self.update_format_type(x, calc),
_('Exponent / Scientific notation'))
el = [
{'icon': 'digits-9', 'html': '9'},
{'icon': 'digits-12', 'html': '12'},
{'icon': 'digits-15', 'html': '15'},
{'icon': 'digits-6', 'html': '6'},
]
self._digits_button = IconToggleToolButton(
el,
lambda x: self.update_digits(x, calc),
_('Number of shown digits'))
el = [
{'icon': 'base-10', 'html': '10'},
{'icon': 'base-2', 'html': '2'},
{'icon': 'base-16', 'html': '16'},
{'icon': 'base-8', 'html': '8'}
]
self._base_button = IconToggleToolButton(
el,
lambda x: self.update_int_base(x, calc),
_('Integer formatting base'))
self.update_layout()
self.show_all()
def update_layout(self):
if gtk.gdk.screen_width() < 14 * GRID_CELL_SIZE or \
self._target_toolbar is None:
target_toolbar = self
if self._target_toolbar is not None:
self._remove_buttons(self._target_toolbar)
else:
target_toolbar = self._target_toolbar
self._remove_buttons(self)
target_toolbar.insert(self._line_separator1, -1)
target_toolbar.insert(self._plot_button, -1)
target_toolbar.insert(self._line_separator2, -1)
target_toolbar.insert(self._angle_button, -1)
target_toolbar.insert(self._format_button, -1)
target_toolbar.insert(self._digits_button, -1)
target_toolbar.insert(self._base_button, -1)
def _remove_buttons(self, toolbar):
for item in [self._plot_button, self._line_separator1,
self._line_separator2, self._angle_button,
self._format_button, self._digits_button,
self._base_button]:
toolbar.remove(item)
def update_angle_type(self, text, calc):
var = calc.parser.get_var('angle_scaling')
if var is None:
_logger.warning('Variable angle_scaling not defined.')
return
if text == 'deg':
var.value = MathLib.ANGLE_DEG
elif text == 'rad':
var.value = MathLib.ANGLE_RAD
_logger.debug('Angle scaling: %s', var.value)
def update_format_type(self, text, calc):
if text == 'exp':
calc.ml.set_format_type(MathLib.FORMAT_EXPONENT)
elif text == 'sci':
calc.ml.set_format_type(MathLib.FORMAT_SCIENTIFIC)
_logger.debug('Format type: %s', calc.ml.format_type)
def update_digits(self, text, calc):
calc.ml.set_digit_limit(int(text))
_logger.debug('Digit limit: %s', calc.ml.digit_limit)
def update_int_base(self, text, calc):
calc.ml.set_integer_base(int(text))
_logger.debug('Integer base: %s', calc.ml.integer_base)
| gpl-2.0 | -3,756,317,717,864,053,000 | 35.677922 | 84 | 0.569436 | false |
babelsberg/babelsberg-r | tests/constraints/test_midpoint.py | 1 | 1511 | import py
from ..base import BaseTopazTest
E = 0.00000000000001
class TestConstraintVariableObject(BaseTopazTest):
def execute(self, space, code, *libs):
return [space.execute("""
require "%s"
%s
""" % (lib, code)) for lib in libs]
def test_midpoint(self, space):
w_res = space.execute("""
require "libcassowary"
res = []
class Point
def x; @x; end
def y; @y; end
def + q
Point.new(x+q.x,y+q.y)
end
def * n
Point.new(x*n, y*n)
end
def / n
Point.new(x/n, y/n)
end
def == o
o.x == self.x && o.y == self.y
end
def initialize(x, y)
@x = x
@y = y
end
end
class MidpointLine
attr_reader :end1, :end2, :midpoint
def initialize(pt1, pt2)
@end1 = pt1
@end2 = pt2
@midpoint = Point.new(0,0)
always { (end1 + end2) == (midpoint*2) }
end
def length
@end2.x - @end1.x
end
end
p1 = Point.new(0,10)
p2 = Point.new(20,30)
m = MidpointLine.new(p1,p2)
return p1.x, p1.y, p2.x, p2.y, m.midpoint.x, m.midpoint.y
""")
res = self.unwrap(space, w_res)
assert (res[0] + res[2]) / 2.0 == res[4]
assert (res[1] + res[3]) / 2.0 == res[5]
| bsd-3-clause | -4,466,466,756,293,176,000 | 21.552239 | 65 | 0.433488 | false |
LawrenceK/fs_monitor | fs_monitor.py | 1 | 5649 | #!/usr/bin/python
# (C)opyright L.P.Klyne 2013
sw_topleft = 13
sw_topright = 7
sw_bottomleft = 12
sw_bottomright = 11
led_topleft = 22
led_topright = 18
led_bottomleft = 15
led_bottomright = 16
import logging
import os
import os.path
import time
import subprocess
import RPi.GPIO as GPIO
_log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(filename='example.log',level=logging.DEBUG)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# place holder for adding command line
class Config:
pass
config = Config()
config.rsync_script = "rsync_a_b.sh"
class led:
def __init__(self, channel):
self.channel = channel
GPIO.setup(self.channel, GPIO.OUT, initial=GPIO.HIGH)
self.pwm = GPIO.PWM(self.channel, 1)
self.pwm.start(100.0)
def flash(self, dc):
_log.debug("flash led %s", self.channel)
self.pwm.ChangeDutyCycle(dc)
def on(self):
_log.debug("led on %s", self.channel)
self.pwm.ChangeDutyCycle(0.0)
def off(self):
_log.debug("led off %s", self.channel)
self.pwm.ChangeDutyCycle(100.0)
def is_on(self):
return GPIO.input(self.channel)
class switch:
def __init__(self, channel):
self.channel = channel
self.actions = [] # callable taking self
GPIO.setup(self.channel, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def add_action(self, action):
_log.debug("switch %s add action %s", self.channel, action)
self.actions.append(action)
if len(self.actions) == 1:
GPIO.add_event_detect(
self.channel, GPIO.BOTH,
callback=lambda c: self.edge(), bouncetime=200)
def remove_action(self, action):
if len(self.actions) == 0:
GPIO.remove_event_detect(self.channel)
def edge(self):
if self.is_on():
for a in self.actions:
_log.info("switch trigger %s action %s", self.channel, a)
a(self)
def is_on(self):
return not GPIO.input(self.channel) # pulled up
class disk:
# States:
# NotExist
# ExistUnmounted
# ExistsMounted
def __init__(self, name, mount, led):
self.devicename = name
self.mountpoint = mount
self.managed = False # have we seen the device
self.led = led
def is_mounted(self):
return os.path.ismount(self.mountpoint)
def device_exists(self):
return os.path.exists(self.devicename)
def check_mount(self):
# the aim here is to mount the disk when plugged in but to leave
# unmounted when initiated by a switch and to mount when device
# unplugged and plugged in again. I tried using udev but that
# resulted in the disk mounting to early if plugged in at boot.
if self.device_exists():
if self.managed:
#it is either allredy mounted or being unmounted
pass
else:
_log.info("Disk added %s", self.devicename)
if self.is_mounted():
self.led.on()
else:
self.do_mount()
self.managed = True
return True
elif self.managed:
_log.info("Disk removed %s", self.devicename)
self.managed = False
return False
def do_mount(self):
self.led.flash(10)
_log.info("Mounting %s on %s", self.devicename, self.mountpoint)
subprocess.check_call(["mount", self.mountpoint])
self.led.on()
return True
def do_unmount(self):
if self.is_mounted():
self.led.flash(50)
_log.info("Un Mounting %s from %s", self.devicename, self.mountpoint)
subprocess.check_call(["umount", self.mountpoint])
self.led.off()
return True
leds = [
led(led_topleft),
led(led_topright),
led(led_bottomleft),
led(led_bottomright)
]
switches = [
switch(sw_topleft), # Shutdown
switch(sw_topright), # RSync
switch(sw_bottomleft), # unmount sda1/diskA
switch(sw_bottomright) # unmount sdb1/diskB
]
disks = [
disk('/dev/sda1', '/mnt/diskA', leds[2]),
disk('/dev/sdb1', '/mnt/diskB', leds[3]),
]
rsync_p = None
def do_rsync(script):
global rsync_p
if rsync_p is None and disks[0].is_mounted() and disks[1].is_mounted():
scriptfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), script)
_log.info("Rsync %s to/from %s using %s",
os.path.join(disks[1].mountpoint, "*"),
disks[0].mountpoint,
scriptfile)
leds[1].flash(50)
rsync_p = subprocess.Popen(scriptfile, shell=True)
def do_shutdown():
_log.info("Halt fileserver")
subprocess.check_call(["halt"])
def main():
global rsync_p
leds[0].on()
try:
_log.info("Startup fileserver monitor")
switches[0].add_action(lambda s: do_shutdown())
switches[1].add_action(lambda s: do_rsync(config.rsync_script))
switches[2].add_action(lambda s: disks[0].do_unmount())
switches[3].add_action(lambda s: disks[1].do_unmount())
while(True):
time.sleep(2.0)
if rsync_p is None:
if any([d.check_mount() for d in disks]):
do_rsync(config.rsync_script)
elif rsync_p.poll() is not None: # has rsync completed
rsync_p.returncode
rsync_p = None
leds[1].off()
finally:
leds[0].off()
GPIO.cleanup()
main()
| bsd-3-clause | -5,335,742,982,079,361,000 | 26.691176 | 86 | 0.582404 | false |
coxmediagroup/dolphin | dolphin/tests/templatetags.py | 1 | 2824 | import re
from django.contrib.auth.models import User
from django.template import Context, Template
from dolphin.middleware import LocalStoreMiddleware
from dolphin.tests.flipper import BaseTest
class ActiveTagTest(BaseTest):
fixtures = ['dolphin_base_flags.json']
def check_res(self, text, expected):
t = Template(text)
c = Context()
res = t.render(c)
res = ' '.join(re.findall("[a-zA-Z0-9]+", res)) #strip \n and spaces
self.assertEqual(res,
expected)
def test_ifactive_enabled(self):
text = r"""
{% load dolphin_tags %}
{% ifactive "enabled" %}
Test
{% endifactive %}
"""
expected_resp = "Test"
self.check_res(text, expected_resp)
def test_ifactive_disabled(self):
text = r"""
{% load dolphin_tags %}
{% ifactive "testing_disabled" %}
Test2
{% else %}
Test3
{% endifactive %}
"""
expected_resp = "Test3"
self.check_res(text, expected_resp)
def test_ifactive_missing(self):
text = r"""
{% load dolphin_tags %}
{% ifactive "testing_missing" %}
Test4
{% else %}
Test5
{% endifactive %}
"""
expected_resp = "Test5"
self.check_res(text, expected_resp)
class FlagListTest(BaseTest):
fixtures = ['dolphin_users.json', 'dolphin_user_flags.json', 'dolphin_base_flags.json']
def clear(self):
LocalStoreMiddleware.local.clear()
def _fake_request(self):
req = type("Request", (object,), {})()
return req
def test_active_flag_list(self):
text = r"""{% load dolphin_tags %}{% active_flags %}"""
t = Template(text)
c = Context()
res = t.render(c)
self.assertEqual(res,
"enabled")
def test_active_flag_list_user(self):
text = r"""{% load dolphin_tags %}{% active_flags %}"""
req = self._fake_request()
req.user = User.objects.get(username="registered")
c = Context({'request':req})
t = Template(text)
res = t.render(c)
#test a registered user that is a part of the selected_group flag group
res = res.split(',')
res.sort()
expected = ["enabled","registered_only","selected_group"]
self.assertEqual(res, expected)
self.clear()
req.user = User.objects.get(username='staff')
c = Context({'request':req})
t = Template(text)
res = t.render(c)
#test a staff user that is not in the group expected by selected_group flag
res = res.split(',')
res.sort()
expected = ["enabled","registered_only","staff_only"]
self.assertEqual(res, expected)
| mit | -5,243,593,176,856,959,000 | 27.24 | 91 | 0.554178 | false |
jprovaznik/tripleo-common | tripleo_common/tests/test_updates.py | 1 | 3507 | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
import testtools
from tripleo_common import updates
from tripleo_common.tests import base
class UpdateManagerTest(base.TestCase):
def setUp(self):
super(UpdateManagerTest, self).setUp()
self.image = collections.namedtuple('image', ['id'])
def test_start_update(self):
client = mock.MagicMock()
client.stacks.get.return_value = mock.MagicMock(stack_name='stack')
mock_resource = mock.MagicMock(
resource_type='OS::Nova::Server', logical_resource_id='logical_id',
physical_resource_id='physical_id', parent_resource='parent')
client.resources.list.return_value = [mock_resource]
with mock.patch('tripleo_common.updates.open',
mock.mock_open(read_data='template body'),
create=True) as mopen:
updates.UpdateManager(client).start('123', 'template.yaml')
env = {
'resource_registry': {
'resources': {
'deployments': {
'*': {'hooks': 'pre-create'}
}
}
}
}
params = {
'stack_name': 'stack_update',
'template': 'template body',
'environment': {
'resource_registry': {
'resources': {
'deployments': {
'*': {'hooks': 'pre-create'}
}
}
}
},
'parameters': {'servers': '{"logical_id-parent": "physical_id"}'}}
client.stacks.get.assert_called_once_with('123')
client.resources.list.assert_called_once_with('123', 2)
client.stacks.create.assert_called_once_with(**params)
def test_get_status(self):
client = mock.MagicMock()
client.stacks.get.return_value = mock.MagicMock(
status='IN_PROGRESS',
stack_name='stack')
events = [
mock.MagicMock(
event_time='2015-03-25T09:15:04Z',
resource_name='Controller-0',
resource_status='CREATE_IN_PROGRESS',
resource_status_reason='Paused until the hook is cleared'),
mock.MagicMock(
event_time='2015-03-25T09:15:05Z',
resource_name='Controller-1',
resource_status='CREATE_COMPLETE',
resource_status_reason=''),
]
client.resources.get.return_value = mock.MagicMock(
physical_resource_id='resource')
client.events.list.return_value = events
status, resources = updates.UpdateManager(client, '123').get_status()
client.stacks.get.assert_called_once_with('123')
client.events.list.assert_called_once_with('resource')
self.assertEqual('WAITING', status)
| apache-2.0 | 6,384,612,201,289,550,000 | 37.966667 | 79 | 0.57485 | false |
arokem/scipy | scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py | 1 | 12773 | """ Test functions for the sparse.linalg.eigen.lobpcg module
"""
from __future__ import division, print_function, absolute_import
import itertools
import platform
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
import pytest
from numpy import ones, r_, diag
from numpy.random import rand
from scipy.linalg import eig, eigh, toeplitz, orth
from scipy.sparse import spdiags, diags, eye
from scipy.sparse.linalg import eigs, LinearOperator
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
def MikotaPair(n):
"""Build a pair of full diagonal matrices for the generalized eigenvalue
problem. The Mikota pair acts as a nice test since the eigenvalues are the
squares of the integers n, n=1,2,...
"""
x = np.arange(1, n+1)
B = diag(1./x)
y = np.arange(n-1, 0, -1)
z = np.arange(2*n-1, 0, -2)
A = diag(z)-diag(y, -1)-diag(y, 1)
return A, B
def compare_solutions(A, B, m):
"""Check eig vs. lobpcg consistency.
"""
n = A.shape[0]
np.random.seed(0)
V = rand(n, m)
X = orth(V)
eigvals, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False)
eigvals.sort()
w, _ = eig(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
def test_Small():
A, B = ElasticRod(10)
compare_solutions(A, B, 10)
A, B = MikotaPair(10)
compare_solutions(A, B, 10)
def test_ElasticRod():
A, B = ElasticRod(100)
compare_solutions(A, B, 20)
def test_MikotaPair():
A, B = MikotaPair(100)
compare_solutions(A, B, 20)
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
def test_diagonal():
"""Check for diagonal matrices.
"""
# This test was moved from '__main__' in lobpcg.py.
# Coincidentally or not, this is the same eigensystem
# required to reproduce arpack bug
# https://forge.scilab.org/p/arpack-ng/issues/1397/
# even using the same n=100.
np.random.seed(1234)
# The system of interest is of size n x n.
n = 100
# We care about only m eigenpairs.
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A = diags([vals], [0], (n, n))
B = eye(n)
# Let the preconditioner M be the inverse of A.
M = diags([1./vals], [0], (n, n))
# Pick random initial vectors.
X = np.random.rand(n, m)
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
m_excluded = 3
Y = np.eye(n, m_excluded)
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded, 1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
np.random.seed(1234)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for size, k, gen in itertools.product(sizes, ks, gens):
if k > size:
continue
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
H = 10 * np.eye(size) + H + H.T.conj()
X = np.random.rand(size, k)
if not gen:
B = np.eye(size)
w, v = lobpcg(H, X, maxiter=5000)
w0, _ = eigh(H)
else:
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
B = 10 * np.eye(size) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigs_consistency(n, atol):
"""Check eigs vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = spdiags(vals, 0, n, n)
np.random.seed(345678)
X = np.random.rand(n, 2)
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigs(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
def test_verbosity(tmpdir):
"""Check that nonzero verbosity level code runs.
"""
A, B = ElasticRod(100)
n = A.shape[0]
m = 20
np.random.seed(0)
V = rand(n, m)
X = orth(V)
_, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
verbosityLevel=9)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
np.random.seed(1234)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-9, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-5)
def test_random_initial_float32():
"""Check lobpcg in float32 for specific initial.
"""
np.random.seed(3)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.rand(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
def test_maxit_None():
"""Check lobpcg if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 2 (initial and final values).
"""
np.random.seed(1566950023)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
_, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=20, retLambdaHistory=True)
assert_allclose(np.shape(l_h)[0], 20+2)
@pytest.mark.slow
def test_diagonal_data_types():
"""Check lobpcg for diagonal matrices for all matrix types.
"""
np.random.seed(1234)
n = 50
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
for s_f in list_sparse_format:
As64 = diags([vals * vals], [0], (n, n), format=s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
listA = [Af64, As64, Af32, As32]
Bs64 = diags([vals], [0], (n, n), format=s_f)
Bf64 = Bs64.toarray()
listB = [Bf64, Bs64]
# Define the preconditioner function as LinearOperator.
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n), dtype=float)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n), dtype=float)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n), dtype=np.float32)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n), dtype=np.float32)
listM = [None, Ms64precondLO, Mf64precondLO,
Ms32precondLO, Mf32precondLO]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = np.random.rand(n, m)
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
for A, B, M, X, Y in itertools.product(listA, listB, listM, listX,
listY):
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m))
| bsd-3-clause | -3,813,003,098,075,375,000 | 31.092965 | 82 | 0.581148 | false |
docusign/docusign-python-client | docusign_esign/models/id_check_configuration.py | 1 | 4802 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IdCheckConfiguration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'auth_steps': 'list[IdCheckSecurityStep]',
'is_default': 'str',
'name': 'str'
}
attribute_map = {
'auth_steps': 'authSteps',
'is_default': 'isDefault',
'name': 'name'
}
def __init__(self, auth_steps=None, is_default=None, name=None): # noqa: E501
"""IdCheckConfiguration - a model defined in Swagger""" # noqa: E501
self._auth_steps = None
self._is_default = None
self._name = None
self.discriminator = None
if auth_steps is not None:
self.auth_steps = auth_steps
if is_default is not None:
self.is_default = is_default
if name is not None:
self.name = name
@property
def auth_steps(self):
"""Gets the auth_steps of this IdCheckConfiguration. # noqa: E501
# noqa: E501
:return: The auth_steps of this IdCheckConfiguration. # noqa: E501
:rtype: list[IdCheckSecurityStep]
"""
return self._auth_steps
@auth_steps.setter
def auth_steps(self, auth_steps):
"""Sets the auth_steps of this IdCheckConfiguration.
# noqa: E501
:param auth_steps: The auth_steps of this IdCheckConfiguration. # noqa: E501
:type: list[IdCheckSecurityStep]
"""
self._auth_steps = auth_steps
@property
def is_default(self):
"""Gets the is_default of this IdCheckConfiguration. # noqa: E501
# noqa: E501
:return: The is_default of this IdCheckConfiguration. # noqa: E501
:rtype: str
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""Sets the is_default of this IdCheckConfiguration.
# noqa: E501
:param is_default: The is_default of this IdCheckConfiguration. # noqa: E501
:type: str
"""
self._is_default = is_default
@property
def name(self):
"""Gets the name of this IdCheckConfiguration. # noqa: E501
# noqa: E501
:return: The name of this IdCheckConfiguration. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IdCheckConfiguration.
# noqa: E501
:param name: The name of this IdCheckConfiguration. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IdCheckConfiguration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IdCheckConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 8,359,876,280,570,722,000 | 26.757225 | 140 | 0.557268 | false |
Undo1/Torch | script.py | 1 | 1147 | from BeautifulSoup import BeautifulSoup
import mysql.connector
import config #Where we keep our passwords and stuff
import tldextract
import itertools
cnx = mysql.connector.connect(user=config.MySQLUsername(), password=config.MySQLPassword(), host=config.MySQLHost(), database=config.MySQLDatabase())
cursor = cnx.cursor()
query = ("SELECT Body, Score FROM Posts WHERE PostTypeId=2")
cursor.execute(query)
sites = []
for (Body, Score) in cursor:
linksInAnswer = []
soup = BeautifulSoup(Body)
for link in soup.findAll('a'):
extract = tldextract.extract(link.get('href'))
# print extract
if len(extract.subdomain) > 0:
site = extract.subdomain + '.' + extract.domain + '.' + extract.suffix
else:
site = extract.domain + '.' + extract.suffix
site = link.get('href')
linksInAnswer.append(site)
linksInAnswer = set(linksInAnswer)
sites.extend(linksInAnswer)
groupedsites = [list(g) for k, g in itertools.groupby(sorted(sites))]
groupedsites = sorted(groupedsites, key=len, reverse=True)
for sitegroup in groupedsites:
if len(sitegroup) > 3: print str(len(sitegroup)) + " x " + sitegroup[0]
cursor.close()
cnx.close() | mit | -2,881,982,992,902,040,000 | 27 | 149 | 0.72973 | false |
mistercrunch/panoramix | superset/databases/api.py | 1 | 34612 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any, Dict, List, Optional
from zipfile import ZipFile
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from marshmallow import ValidationError
from sqlalchemy.exc import NoSuchTableError, OperationalError, SQLAlchemyError
from superset import app, event_logger
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.commands.create import CreateDatabaseCommand
from superset.databases.commands.delete import DeleteDatabaseCommand
from superset.databases.commands.exceptions import (
DatabaseConnectionFailedError,
DatabaseCreateFailedError,
DatabaseDeleteDatasetsExistFailedError,
DatabaseDeleteFailedError,
DatabaseInvalidError,
DatabaseNotFoundError,
DatabaseUpdateFailedError,
)
from superset.databases.commands.export import ExportDatabasesCommand
from superset.databases.commands.importers.dispatcher import ImportDatabasesCommand
from superset.databases.commands.test_connection import TestConnectionDatabaseCommand
from superset.databases.commands.update import UpdateDatabaseCommand
from superset.databases.commands.validate import ValidateDatabaseParametersCommand
from superset.databases.dao import DatabaseDAO
from superset.databases.decorators import check_datasource_access
from superset.databases.filters import DatabaseFilter
from superset.databases.schemas import (
database_schemas_query_schema,
DatabaseFunctionNamesResponse,
DatabasePostSchema,
DatabasePutSchema,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
get_export_ids_schema,
SchemasResponseSchema,
SelectStarResponseSchema,
TableMetadataResponseSchema,
)
from superset.databases.utils import get_table_metadata
from superset.db_engine_specs import get_available_engine_specs
from superset.exceptions import InvalidPayloadFormatError, InvalidPayloadSchemaError
from superset.extensions import security_manager
from superset.models.core import Database
from superset.typing import FlaskResponse
from superset.utils.core import error_msg_from_exception
from superset.views.base_api import BaseSupersetModelRestApi, statsd_metrics
logger = logging.getLogger(__name__)
class DatabaseRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(Database)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
"table_metadata",
"select_star",
"schemas",
"test_connection",
"related_objects",
"function_names",
"available",
"validate_parameters",
}
resource_name = "database"
class_permission_name = "Database"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
allow_browser_login = True
base_filters = [["id", DatabaseFilter, lambda: []]]
show_columns = [
"id",
"database_name",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"configuration_method",
"allow_ctas",
"allow_cvas",
"allow_dml",
"backend",
"force_ctas_schema",
"allow_multi_schema_metadata_fetch",
"impersonate_user",
"encrypted_extra",
"extra",
"parameters",
"server_cert",
"sqlalchemy_uri",
]
list_columns = [
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_cost_estimate",
"allows_subquery",
"allows_virtual_table_explore",
"backend",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.last_name",
"database_name",
"explore_database_id",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
add_columns = [
"database_name",
"sqlalchemy_uri",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"configuration_method",
"force_ctas_schema",
"impersonate_user",
"allow_multi_schema_metadata_fetch",
"extra",
"encrypted_extra",
"server_cert",
]
edit_columns = add_columns
list_select_columns = list_columns + ["extra", "sqlalchemy_uri", "password"]
order_columns = [
"allow_csv_upload",
"allow_dml",
"allow_run_async",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"database_name",
"expose_in_sqllab",
]
# Removes the local limit for the page size
max_page_size = -1
add_model_schema = DatabasePostSchema()
edit_model_schema = DatabasePutSchema()
apispec_parameter_schemas = {
"database_schemas_query_schema": database_schemas_query_schema,
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_tag = "Database"
openapi_spec_component_schemas = (
DatabaseFunctionNamesResponse,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
TableMetadataResponseSchema,
SelectStarResponseSchema,
SchemasResponseSchema,
)
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Database
---
post:
description: >-
Create a new Database.
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Database added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDatabaseCommand(g.user, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = new_model.sqlalchemy_uri
return self.response(201, id=new_model.id, result=item)
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put( # pylint: disable=too-many-return-statements, arguments-differ
self, pk: int
) -> Response:
"""Changes a Database
---
put:
description: >-
Changes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Database changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDatabaseCommand(g.user, pk, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = changed_model.sqlalchemy_uri
return self.response(200, id=changed_model.id, result=item)
except DatabaseNotFoundError:
return self.response_404()
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response: # pylint: disable=arguments-differ
"""Deletes a Database
---
delete:
description: >-
Deletes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Database deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDatabaseCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatabaseNotFoundError:
return self.response_404()
except DatabaseDeleteDatasetsExistFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>/schemas/")
@protect()
@safe
@rison(database_schemas_query_schema)
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".schemas",
log_to_statsd=False,
)
def schemas(self, pk: int, **kwargs: Any) -> FlaskResponse:
"""Get all schemas from a database
---
get:
description: Get all schemas from a database
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/database_schemas_query_schema'
responses:
200:
description: A List of all schemas from the database
content:
application/json:
schema:
$ref: "#/components/schemas/SchemasResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = self.datamodel.get(pk, self._base_filters)
if not database:
return self.response_404()
try:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=kwargs["rison"].get("force", False),
)
schemas = security_manager.get_schemas_accessible_by_user(database, schemas)
return self.response(200, result=schemas)
except OperationalError:
return self.response(
500, message="There was an error connecting to the database"
)
@expose("/<int:pk>/table/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".table_metadata",
log_to_statsd=False,
)
def table_metadata(
self, database: Database, table_name: str, schema_name: str
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database table metadata
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: Table metadata information
content:
application/json:
schema:
$ref: "#/components/schemas/TableMetadataResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.table_metadata.__name__)
try:
table_info = get_table_metadata(database, table_name, schema_name)
except SQLAlchemyError as ex:
self.incr_stats("error", self.table_metadata.__name__)
return self.response_422(error_msg_from_exception(ex))
self.incr_stats("success", self.table_metadata.__name__)
return self.response(200, **table_info)
@expose("/<int:pk>/select_star/<table_name>/", methods=["GET"])
@expose("/<int:pk>/select_star/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.select_star",
log_to_statsd=False,
)
def select_star(
self, database: Database, table_name: str, schema_name: Optional[str] = None
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database select star for table
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: SQL statement for a select star for table
content:
application/json:
schema:
$ref: "#/components/schemas/SelectStarResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.select_star.__name__)
try:
result = database.select_star(
table_name, schema_name, latest_partition=True, show_cols=True
)
except NoSuchTableError:
self.incr_stats("error", self.select_star.__name__)
return self.response(404, message="Table not found on the database")
self.incr_stats("success", self.select_star.__name__)
return self.response(200, result=result)
@expose("/test_connection", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".test_connection",
log_to_statsd=False,
)
def test_connection( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""Tests a database connection
---
post:
description: >-
Tests a database connection
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseTestConnectionSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = DatabaseTestConnectionSchema().load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
TestConnectionDatabaseCommand(g.user, item).run()
return self.response(200, message="OK")
@expose("/<int:pk>/related_objects/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".related_objects",
log_to_statsd=False,
)
def related_objects(self, pk: int) -> Response:
"""Get charts and dashboards count associated to a database
---
get:
description:
Get charts and dashboards count associated to a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseRelatedObjectsResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
data = DatabaseDAO.get_related_objects(pk)
charts = [
{
"id": chart.id,
"slice_name": chart.slice_name,
"viz_type": chart.viz_type,
}
for chart in data["charts"]
]
dashboards = [
{
"id": dashboard.id,
"json_metadata": dashboard.json_metadata,
"slug": dashboard.slug,
"title": dashboard.dashboard_title,
}
for dashboard in data["dashboards"]
]
return self.response(
200,
charts={"count": len(charts), "result": charts},
dashboards={"count": len(dashboards), "result": dashboards},
)
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self, **kwargs: Any) -> Response:
"""Export database(s) with associated datasets
---
get:
description: Download database(s) and associated dataset(s) as a zip file
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: A zip file with database(s) and dataset(s) as YAML
content:
application/zip:
schema:
type: string
format: binary
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"database_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDatabasesCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DatabaseNotFoundError:
return self.response_404()
buf.seek(0)
return send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import database(s) with associated datasets
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing databases?
type: boolean
responses:
200:
description: Database import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDatabasesCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
@expose("/<int:pk>/function_names/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".function_names",
log_to_statsd=False,
)
def function_names(self, pk: int) -> Response:
"""Get function names supported by a database
---
get:
description:
Get function names supported by a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseFunctionNamesResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
return self.response(200, function_names=database.function_names,)
@expose("/available/", methods=["GET"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".available",
log_to_statsd=False,
)
def available(self) -> Response:
"""Return names of databases currently available
---
get:
description:
Get names of databases currently available
responses:
200:
description: Database names
content:
application/json:
schema:
type: array
items:
type: object
properties:
name:
description: Name of the database
type: string
engine:
description: Name of the SQLAlchemy engine
type: string
available_drivers:
description: Installed drivers for the engine
type: array
items:
type: string
default_driver:
description: Default driver for the engine
type: string
preferred:
description: Is the database preferred?
type: boolean
sqlalchemy_uri_placeholder:
description: Example placeholder for the SQLAlchemy URI
type: string
parameters:
description: JSON schema defining the needed parameters
type: object
400:
$ref: '#/components/responses/400'
500:
$ref: '#/components/responses/500'
"""
preferred_databases: List[str] = app.config.get("PREFERRED_DATABASES", [])
available_databases = []
for engine_spec, drivers in get_available_engine_specs().items():
payload: Dict[str, Any] = {
"name": engine_spec.engine_name,
"engine": engine_spec.engine,
"available_drivers": sorted(drivers),
"preferred": engine_spec.engine_name in preferred_databases,
}
if hasattr(engine_spec, "default_driver"):
payload["default_driver"] = engine_spec.default_driver # type: ignore
# show configuration parameters for DBs that support it
if (
hasattr(engine_spec, "parameters_json_schema")
and hasattr(engine_spec, "sqlalchemy_uri_placeholder")
and getattr(engine_spec, "default_driver") in drivers
):
payload[
"parameters"
] = engine_spec.parameters_json_schema() # type: ignore
payload[
"sqlalchemy_uri_placeholder"
] = engine_spec.sqlalchemy_uri_placeholder # type: ignore
available_databases.append(payload)
# sort preferred first
response = sorted(
(payload for payload in available_databases if payload["preferred"]),
key=lambda payload: preferred_databases.index(payload["name"]),
)
# add others
response.extend(
sorted(
(
payload
for payload in available_databases
if not payload["preferred"]
),
key=lambda payload: payload["name"],
)
)
return self.response(200, databases=response)
@expose("/validate_parameters", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".validate_parameters",
log_to_statsd=False,
)
def validate_parameters( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""validates database connection parameters
---
post:
description: >-
Validates parameters used to connect to a database
requestBody:
description: DB-specific parameters
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseValidateParametersSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
raise InvalidPayloadFormatError("Request is not JSON")
try:
payload = DatabaseValidateParametersSchema().load(request.json)
except ValidationError as error:
raise InvalidPayloadSchemaError(error)
command = ValidateDatabaseParametersCommand(g.user, payload)
command.run()
return self.response(200, message="OK")
| apache-2.0 | -2,846,362,746,387,505,000 | 33.405567 | 88 | 0.53649 | false |
jonathf/chaospy | chaospy/distributions/collection/wald.py | 1 | 2480 | """Wald distribution."""
import numpy
from scipy import special
import chaospy
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class wald(SimpleDistribution):
"""Wald distribution."""
def __init__(self, mu):
super(wald, self).__init__(dict(mu=mu))
def _pdf(self, x, mu):
out = numpy.zeros(x.shape)
indices = x > 0
out[indices] = 1.0/numpy.sqrt(2*numpy.pi*x[indices])
out[indices] *= numpy.exp(-(1-mu*x[indices])**2.0 / (2*x[indices]*mu**2.0))
return out
def _cdf(self, x, mu):
trm1 = 1./mu-x
trm2 = 1./mu+x
isqx = numpy.full_like(x, numpy.inf)
indices = x > 0
isqx[indices] = 1./numpy.sqrt(x[indices])
out = 1.-special.ndtr(isqx*trm1)
out -= numpy.exp(2.0/mu)*special.ndtr(-isqx*trm2)
out = numpy.where(x == numpy.inf, 1, out)
out = numpy.where(x == -numpy.inf, 0, out)
return out
def _lower(self, mu):
return 0.
def _upper(self, mu):
qloc = numpy.repeat(1-1e-12, mu.size)
out = chaospy.approximate_inverse(
distribution=self,
idx=0,
qloc=qloc,
parameters=dict(mu=mu),
bounds=(0., 60+numpy.e**(1./(mu+0.1))),
tolerance=1e-15,
)
return out
class Wald(ShiftScaleDistribution):
"""
Wald distribution.
Reciprocal inverse Gaussian distribution.
Args:
mu (float, Distribution):
Mean of the normal distribution
scale (float, Distribution):
Scaling parameter
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.Wald(0.5)
>>> distribution
Wald(0.5)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([ 0. , 1.416, 2.099, 2.94 , 4.287, 54.701])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.297, 0.275, 0.2 , 0.105, 0. ])
>>> distribution.sample(4).round(3)
array([0.61 , 1.401, 1.274, 2.115])
"""
def __init__(self, mu=1, scale=1, shift=0):
super(Wald, self).__init__(
dist=wald(mu),
scale=scale,
shift=shift,
repr_args=[mu],
)
| mit | -2,990,161,795,545,608,000 | 26.865169 | 83 | 0.522177 | false |
crchemist/scioncc | src/ion/util/preload.py | 1 | 24501 | #!/usr/bin/env python
"""Utility to bulk load resources into the system, e.g. for initial preload"""
__author__ = 'Michael Meisinger'
import yaml
import re
import os
from pyon.core import MSG_HEADER_ACTOR, MSG_HEADER_ROLES, MSG_HEADER_VALID
from pyon.core.bootstrap import get_service_registry
from pyon.core.governance import get_system_actor
from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id
from pyon.ion.resource import get_restype_lcsm
from pyon.public import CFG, log, BadRequest, Inconsistent, NotFound, IonObject, RT, OT, AS, LCS, named_any, get_safe, get_ion_ts, PRED
from ion.util.parse_utils import get_typed_value
# Well known action config keys
KEY_SCENARIO = "scenario"
KEY_ID = "id"
KEY_OWNER = "owner"
KEY_LCSTATE = "lcstate"
KEY_ORGS = "orgs"
# Well known aliases
ID_ORG_ION = "ORG_ION"
ID_SYSTEM_ACTOR = "USER_SYSTEM"
UUID_RE = '^[0-9a-fA-F]{32}$'
class Preloader(object):
def initialize_preloader(self, process, preload_cfg):
log.info("Initialize preloader")
self.process = process
self.preload_cfg = preload_cfg or {}
self._init_preload()
self.rr = self.process.container.resource_registry
self.bulk = self.preload_cfg.get("bulk", False) is True
# Loads internal bootstrapped resource ids that will be referenced during preload
self._load_system_ids()
# Load existing resources by preload ID
self._prepare_incremental()
def _init_preload(self):
self.obj_classes = {} # Cache of class for object types
self.object_definitions = None # Dict of preload rows before processing
self.resource_ids = {} # Holds a mapping of preload IDs to internal resource ids
self.resource_objs = {} # Holds a mapping of preload IDs to the actual resource objects
self.resource_assocs = {} # Holds a mapping of existing associations list by predicate
self.bulk_resources = {} # Keeps resource objects to be bulk inserted/updated
self.bulk_associations = {} # Keeps association objects to be bulk inserted/updated
self.bulk_existing = set() # This keeps the ids of the bulk objects to update instead of delete
def preload_master(self, filename, skip_steps=None):
"""Executes a preload master file"""
log.info("Preloading from master file: %s", filename)
with open(filename, "r") as f:
master_yml = f.read()
master_cfg = yaml.load(master_yml)
if not "preload_type" in master_cfg or master_cfg["preload_type"] != "steps":
raise BadRequest("Invalid preload steps file")
for step in master_cfg["steps"]:
if skip_steps and step in skip_steps:
log.info("Skipping step %s" % step)
continue
step_filename = "%s/%s.yml" % (os.path.dirname(filename), step)
self._execute_step(step_filename)
def _execute_step(self, filename):
"""Executes a preload step file"""
with open(filename, "r") as f:
step_yml = f.read()
step_cfg = yaml.safe_load(step_yml)
if not "preload_type" in step_cfg or step_cfg["preload_type"] != "actions":
raise BadRequest("Invalid preload actions file")
for action in step_cfg["actions"]:
try:
self._execute_action(action)
except Exception as ex:
log.warn("Action failed: " + str(ex), exc_info=True)
self.commit_bulk()
def _execute_action(self, action):
"""Executes a preload action"""
action_type = action["action"]
#log.debug("Preload action %s id=%s", action_type, action.get("id", ""))
scope, func_type = action_type.split(":", 1)
default_funcname = "_load_%s_%s" % (scope, func_type)
action_func = getattr(self.process, default_funcname, None)
if not action_func:
action_funcname = self.preload_cfg["action_plugins"].get(action_type, {})
if not action_funcname:
log.warn("Unknown action: %s", action_type)
return
action_func = getattr(self.process, action_funcname, None)
if not action_func:
log.warn("Action function %s not found for action %s", action_funcname, action_type)
return
action_func(action)
# -------------------------------------------------------------------------
def _load_system_ids(self):
"""Read some system objects for later reference"""
org_objs, _ = self.rr.find_resources(name="ION", restype=RT.Org, id_only=False)
if not org_objs:
raise BadRequest("ION org not found. Was system force_cleaned since bootstrap?")
ion_org_id = org_objs[0]._id
self._register_id(ID_ORG_ION, ion_org_id, org_objs[0])
system_actor = get_system_actor()
system_actor_id = system_actor._id if system_actor else 'anonymous'
self._register_id(ID_SYSTEM_ACTOR, system_actor_id, system_actor if system_actor else None)
def _prepare_incremental(self):
"""
Look in the resource registry for any resources that have a preload ID on them so that
they can be referenced under this preload ID during this load run.
"""
log.debug("Loading prior preloaded resources for reference")
res_objs, res_keys = self.rr.find_resources_ext(alt_id_ns="PRE", id_only=False)
res_preload_ids = [key['alt_id'] for key in res_keys]
res_ids = [obj._id for obj in res_objs]
log.debug("Found %s previously preloaded resources", len(res_objs))
res_assocs = self.rr.find_associations(predicate="*", id_only=False)
[self.resource_assocs.setdefault(assoc["p"], []).append(assoc) for assoc in res_assocs]
log.debug("Found %s existing associations", len(res_assocs))
existing_resources = dict(zip(res_preload_ids, res_objs))
if len(existing_resources) != len(res_objs):
raise BadRequest("Stored preload IDs are NOT UNIQUE!!! Cannot link to old resources")
res_id_mapping = dict(zip(res_preload_ids, res_ids))
self.resource_ids.update(res_id_mapping)
res_obj_mapping = dict(zip(res_preload_ids, res_objs))
self.resource_objs.update(res_obj_mapping)
def create_object_from_cfg(self, cfg, objtype, key="resource", prefix="", existing_obj=None):
"""
Construct an IonObject of a determined type from given config dict with attributes.
Convert all attributes according to their schema target type. Supports nested objects.
Supports edit of objects of same type.
"""
log.trace("Create object type=%s, prefix=%s", objtype, prefix)
if objtype == "dict":
schema = None
else:
schema = self._get_object_class(objtype)._schema
obj_fields = {} # Attributes for IonObject creation as dict
nested_done = set() # Names of attributes with nested objects already created
obj_cfg = get_safe(cfg, key)
for subkey, value in obj_cfg.iteritems():
if subkey.startswith(prefix):
attr = subkey[len(prefix):]
if '.' in attr: # We are a parent entry
# TODO: Make sure to not create nested object multiple times
slidx = attr.find('.')
nested_obj_field = attr[:slidx]
parent_field = attr[:slidx+1]
nested_prefix = prefix + parent_field # prefix plus nested object name
if '[' in nested_obj_field and nested_obj_field[-1] == ']':
sqidx = nested_obj_field.find('[')
nested_obj_type = nested_obj_field[sqidx+1:-1]
nested_obj_field = nested_obj_field[:sqidx]
elif objtype == "dict":
nested_obj_type = "dict"
else:
nested_obj_type = schema[nested_obj_field]['type']
# Make sure to not create the same nested object twice
if parent_field in nested_done:
continue
# Support direct indexing in a list
list_idx = -1
if nested_obj_type.startswith("list/"):
_, list_idx, nested_obj_type = nested_obj_type.split("/")
list_idx = int(list_idx)
log.trace("Get nested object field=%s type=%s, prefix=%s", nested_obj_field, nested_obj_type, prefix)
nested_obj = self.create_object_from_cfg(cfg, nested_obj_type, key, nested_prefix)
if list_idx >= 0:
my_list = obj_fields.setdefault(nested_obj_field, [])
if list_idx >= len(my_list):
my_list[len(my_list):list_idx] = [None]*(list_idx-len(my_list)+1)
my_list[list_idx] = nested_obj
else:
obj_fields[nested_obj_field] = nested_obj
nested_done.add(parent_field)
elif objtype == "dict":
# TODO: What about type?
obj_fields[attr] = value
elif attr in schema: # We are the leaf attribute
try:
if value:
fieldvalue = get_typed_value(value, schema[attr])
obj_fields[attr] = fieldvalue
except Exception:
log.warn("Object type=%s, prefix=%s, field=%s cannot be converted to type=%s. Value=%s",
objtype, prefix, attr, schema[attr]['type'], value, exc_info=True)
#fieldvalue = str(fieldvalue)
else:
# warn about unknown fields just once -- not on each row
log.warn("Skipping unknown field in %s: %s%s", objtype, prefix, attr)
if objtype == "dict":
obj = obj_fields
else:
if existing_obj:
# Edit attributes
if existing_obj.type_ != objtype:
raise Inconsistent("Cannot edit resource. Type mismatch old=%s, new=%s" % (existing_obj.type_, objtype))
# TODO: Don't edit empty nested attributes
for attr in list(obj_fields.keys()):
if not obj_fields[attr]:
del obj_fields[attr]
for attr in ('alt_ids','_id','_rev','type_'):
if attr in obj_fields:
del obj_fields[attr]
existing_obj.__dict__.update(obj_fields)
log.trace("Update object type %s using field names %s", objtype, obj_fields.keys())
obj = existing_obj
else:
if cfg.get(KEY_ID, None) and 'alt_ids' in schema:
if 'alt_ids' in obj_fields:
obj_fields['alt_ids'].append("PRE:"+cfg[KEY_ID])
else:
obj_fields['alt_ids'] = ["PRE:"+cfg[KEY_ID]]
log.trace("Create object type %s from field names %s", objtype, obj_fields.keys())
obj = IonObject(objtype, **obj_fields)
return obj
def _get_object_class(self, objtype):
if objtype in self.obj_classes:
return self.obj_classes[objtype]
try:
obj_class = named_any("interface.objects.%s" % objtype)
self.obj_classes[objtype] = obj_class
return obj_class
except Exception:
log.error('failed to find class for type %s' % objtype)
def _get_service_client(self, service):
return get_service_registry().services[service].client(process=self.process)
def _register_id(self, alias, resid, res_obj=None, is_update=False):
"""Keep preload resource in internal dict for later reference"""
if not is_update and alias in self.resource_ids:
raise BadRequest("ID alias %s used twice" % alias)
self.resource_ids[alias] = resid
self.resource_objs[alias] = res_obj
log.trace("Added resource alias=%s to id=%s", alias, resid)
def _read_resource_id(self, res_id):
existing_obj = self.rr.read(res_id)
self.resource_objs[res_id] = existing_obj
self.resource_ids[res_id] = res_id
return existing_obj
def _get_resource_id(self, alias_id):
"""Returns resource ID from preload alias ID, scanning also for real resource IDs to be loaded"""
if alias_id in self.resource_ids:
return self.resource_ids[alias_id]
elif re.match(UUID_RE, alias_id):
# This is obviously an ID of a real resource - let it fail if not existing
self._read_resource_id(alias_id)
log.debug("Referencing existing resource via direct ID: %s", alias_id)
return alias_id
else:
raise KeyError(alias_id)
def _get_resource_obj(self, res_id, silent=False):
"""Returns a resource object from one of the memory locations for given preload or internal ID"""
if self.bulk and res_id in self.bulk_resources:
return self.bulk_resources[res_id]
elif res_id in self.resource_objs:
return self.resource_objs[res_id]
else:
# Real ID not alias - reverse lookup
alias_ids = [alias_id for alias_id,int_id in self.resource_ids.iteritems() if int_id==res_id]
if alias_ids:
return self.resource_objs[alias_ids[0]]
if not silent:
log.debug("_get_resource_obj(): No object found for '%s'", res_id)
return None
def _resource_exists(self, res_id):
if not res_id:
return None
res = self._get_resource_obj(res_id, silent=True)
return res is not None
def _has_association(self, sub, pred, obj):
"""Returns True if the described associated already exists."""
for assoc in self.resource_assocs.get(pred, []):
if assoc.s == sub and assoc.o == obj:
return True
return False
def _update_resource_obj(self, res_id):
"""Updates an existing resource object"""
res_obj = self._get_resource_obj(res_id)
self.rr.update(res_obj)
log.debug("Updating resource %s (pre=%s id=%s): '%s'", res_obj.type_, res_id, res_obj._id, res_obj.name)
def _get_alt_id(self, res_obj, prefix):
alt_ids = getattr(res_obj, 'alt_ids', [])
for alt_id in alt_ids:
if alt_id.startswith(prefix+":"):
alt_id_str = alt_id[len(prefix)+1:]
return alt_id_str
def _get_op_headers(self, owner_id, force_user=False):
headers = {}
if owner_id:
owner_id = self.resource_ids[owner_id]
headers[MSG_HEADER_ACTOR] = owner_id
headers[MSG_HEADER_ROLES] = {'ION': ['SUPERUSER', 'MODERATOR']}
headers[MSG_HEADER_VALID] = '0'
elif force_user:
return self._get_system_actor_headers()
return headers
def _get_system_actor_headers(self):
return {MSG_HEADER_ACTOR: self.resource_ids[ID_SYSTEM_ACTOR],
MSG_HEADER_ROLES: {'ION': ['SUPERUSER', 'MODERATOR']},
MSG_HEADER_VALID: '0'}
def basic_resource_create(self, cfg, restype, svcname, svcop, key="resource",
set_attributes=None, support_bulk=False, **kwargs):
"""
Orchestration method doing the following:
- create an object from a row,
- add any defined constraints,
- make a service call to create resource for given object,
- share resource in a given Org
- store newly created resource id and obj for future reference
- (optional) support bulk create/update
"""
res_id_alias = cfg[KEY_ID]
existing_obj = None
if res_id_alias in self.resource_ids:
# TODO: Catch case when ID used twice
existing_obj = self.resource_objs[res_id_alias]
elif re.match(UUID_RE, res_id_alias):
# This is obviously an ID of a real resource
try:
existing_obj = self._read_resource_id(res_id_alias)
log.debug("Updating existing resource via direct ID: %s", res_id_alias)
except NotFound as nf:
pass # Ok it was not there after all
try:
res_obj = self.create_object_from_cfg(cfg, restype, key, "", existing_obj=existing_obj)
except Exception as ex:
log.exception("Error creating object")
raise
if set_attributes:
for attr, attr_val in set_attributes.iteritems():
setattr(res_obj, attr, attr_val)
if existing_obj:
res_id = self.resource_ids[res_id_alias]
if self.bulk and support_bulk:
self.bulk_resources[res_id] = res_obj
self.bulk_existing.add(res_id) # Make sure to remember which objects are existing
else:
# TODO: Use the appropriate service call here
self.rr.update(res_obj)
else:
if self.bulk and support_bulk:
res_id = self._create_bulk_resource(res_obj, res_id_alias)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None))
self._resource_assign_owner(headers, res_obj)
self._resource_advance_lcs(cfg, res_id)
else:
svc_client = self._get_service_client(svcname)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None), force_user=True)
res_id = getattr(svc_client, svcop)(res_obj, headers=headers, **kwargs)
if res_id:
if svcname == "resource_registry" and svcop == "create":
res_id = res_id[0]
res_obj._id = res_id
self._register_id(res_id_alias, res_id, res_obj)
self._resource_assign_org(cfg, res_id)
return res_id
def _create_bulk_resource(self, res_obj, res_alias=None):
if not hasattr(res_obj, "_id"):
res_obj._id = create_unique_resource_id()
ts = get_ion_ts()
if hasattr(res_obj, "ts_created") and not res_obj.ts_created:
res_obj.ts_created = ts
if hasattr(res_obj, "ts_updated") and not res_obj.ts_updated:
res_obj.ts_updated = ts
res_id = res_obj._id
self.bulk_resources[res_id] = res_obj
if res_alias:
self._register_id(res_alias, res_id, res_obj)
return res_id
def _resource_advance_lcs(self, cfg, res_id):
"""
Change lifecycle state of object to requested state. Supports bulk.
"""
res_obj = self._get_resource_obj(res_id)
restype = res_obj.type_
lcsm = get_restype_lcsm(restype)
initial_lcmat = lcsm.initial_state if lcsm else LCS.DEPLOYED
initial_lcav = lcsm.initial_availability if lcsm else AS.AVAILABLE
lcstate = cfg.get(KEY_LCSTATE, None)
if lcstate:
row_lcmat, row_lcav = lcstate.split("_", 1)
if self.bulk and res_id in self.bulk_resources:
self.bulk_resources[res_id].lcstate = row_lcmat
self.bulk_resources[res_id].availability = row_lcav
else:
if row_lcmat != initial_lcmat: # Vertical transition
self.rr.set_lifecycle_state(res_id, row_lcmat)
if row_lcav != initial_lcav: # Horizontal transition
self.rr.set_lifecycle_state(res_id, row_lcav)
elif self.bulk and res_id in self.bulk_resources:
# Set the lcs to resource type appropriate initial values
self.bulk_resources[res_id].lcstate = initial_lcmat
self.bulk_resources[res_id].availability = initial_lcav
def _resource_assign_org(self, cfg, res_id):
"""
Shares the resource in the given orgs. Supports bulk.
"""
org_ids = cfg.get(KEY_ORGS, None)
if org_ids:
org_ids = get_typed_value(org_ids, targettype="simplelist")
for org_id in org_ids:
org_res_id = self.resource_ids[org_id]
if self.bulk and res_id in self.bulk_resources:
# Note: org_id is alias, res_id is internal ID
org_obj = self._get_resource_obj(org_id)
res_obj = self._get_resource_obj(res_id)
# Create association to given Org
assoc_obj = self._create_association(org_obj, PRED.hasResource, res_obj, support_bulk=True)
else:
svc_client = self._get_service_client("org_management")
svc_client.share_resource(org_res_id, res_id, headers=self._get_system_actor_headers())
def _resource_assign_owner(self, headers, res_obj):
if self.bulk and 'ion-actor-id' in headers:
owner_id = headers['ion-actor-id']
user_obj = self._get_resource_obj(owner_id)
if owner_id and owner_id != 'anonymous':
self._create_association(res_obj, PRED.hasOwner, user_obj, support_bulk=True)
def basic_associations_create(self, cfg, res_alias, support_bulk=False):
for assoc in cfg.get("associations", []):
direction, other_id, predicate = assoc.split(",")
res_id = self.resource_ids[res_alias]
other_res_id = self.resource_ids[other_id]
if direction == "TO":
self._create_association(res_id, predicate, other_res_id, support_bulk=support_bulk)
elif direction == "FROM":
self._create_association(other_res_id, predicate, res_id, support_bulk=support_bulk)
def _create_association(self, subject=None, predicate=None, obj=None, support_bulk=False):
"""
Create an association between two IonObjects with a given predicate.
Supports bulk mode
"""
if self.bulk and support_bulk:
if not subject or not predicate or not obj:
raise BadRequest("Association must have all elements set: %s/%s/%s" % (subject, predicate, obj))
if isinstance(subject, basestring):
subject = self._get_resource_obj(subject)
if "_id" not in subject:
raise BadRequest("Subject id not available")
subject_id = subject._id
st = subject.type_
if isinstance(obj, basestring):
obj = self._get_resource_obj(obj)
if "_id" not in obj:
raise BadRequest("Object id not available")
object_id = obj._id
ot = obj.type_
assoc_id = create_unique_association_id()
assoc_obj = IonObject("Association",
s=subject_id, st=st,
p=predicate,
o=object_id, ot=ot,
ts=get_ion_ts())
assoc_obj._id = assoc_id
self.bulk_associations[assoc_id] = assoc_obj
return assoc_id, '1-norev'
else:
return self.rr.create_association(subject, predicate, obj)
def commit_bulk(self):
if not self.bulk_resources and not self.bulk_associations:
return
# Perform the create for resources
res_new = [obj for obj in self.bulk_resources.values() if obj["_id"] not in self.bulk_existing]
res = self.rr.rr_store.create_mult(res_new, allow_ids=True)
# Perform the update for resources
res_upd = [obj for obj in self.bulk_resources.values() if obj["_id"] in self.bulk_existing]
res = self.rr.rr_store.update_mult(res_upd)
# Perform the create for associations
assoc_new = [obj for obj in self.bulk_associations.values()]
res = self.rr.rr_store.create_mult(assoc_new, allow_ids=True)
log.info("Bulk stored {} resource objects ({} updates) and {} associations".format(len(res_new), len(res_upd), len(assoc_new)))
self.bulk_resources.clear()
self.bulk_associations.clear()
self.bulk_existing.clear()
| bsd-2-clause | -1,043,299,870,787,946,800 | 44.288355 | 135 | 0.572303 | false |
stormi/tsunami | outils/gestionnaire_module/commande.py | 1 | 3556 | # -*-coding:Utf-8 -*
# Copyright (c) 2011 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .divers import *
CMD_MESSAGE = """
\"""Package contenant les commandes du module {module}.\"""
"""
CONTENU_CMD = """
\"""Package contenant la commande '{lcommande}'.
\"""
from primaires.interpreteur.commande.commande import Commande
class Cmd{commande}(Commande):
\"""Commande '{lcommande}'.
\"""
def __init__(self):
\"""Constructeur de la commande\"""
Commande.__init__(self, "{lcommande}", "{commande_en}")
self.nom_categorie = "{categorie}"
self.schema = "{schema}"
self.aide_courte = "TODO"
self.aide_longue = \\
"TODO"
def interpreter(self, personnage, dic_masques):
\"""Interprétation de la commande\"""
pass
"""
def ajouter(rep, module, typeMod, entete, commande):
if len(commande) < 2:
print("Pas assez d'argument")
return
commande_fr = commande[1].lower()
commande_en = commande_fr
schema = ""
categorie = ""
if len(commande) > 2:
commande_en = commande[2]
if len(commande) > 3:
categorie = commande[3]
if len(commande) > 4:
schema = commande[4]
contenu = CONTENU_CMD.format(
lcommande=commande_fr,
commande=commande_fr.capitalize(),
commande_en=commande_en,
categorie = categorie,
schema = schema)
repcmd = rep + "commandes/" + commande_fr + "/"
os.makedirs(repcmd)
path = repcmd + "__init__.py"
if os.path.exists(path):
print("Une commande portant ce nom existait, annulation")
return
write(path, entete + contenu)
path = rep + "commandes/" + "__init__.py"
if not os.path.exists(path):
write(path, entete + CMD_MESSAGE.format(module=module))
append(path, "from . import {commande}\n".format(commande=commande_fr))
print("ATTENTION : vous devze modifié le __init__.py du module " \
"pour y rajouter cette commande")
| bsd-3-clause | 6,856,015,116,057,477,000 | 31.605505 | 79 | 0.664603 | false |
catapult-project/catapult | common/py_vulcanize/third_party/rjsmin/bench/write.py | 3 | 10657 | #!/usr/bin/env python
# -*- coding: ascii -*-
r"""
=========================
Write benchmark results
=========================
Write benchmark results.
:Copyright:
Copyright 2014 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.write [-p plain] [-t table] <pickled
-p plain Plain file to write to (like docs/BENCHMARKS).
-t table Table file to write to (like docs/_userdoc/benchmark.txt).
"""
from __future__ import print_function
if __doc__:
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
import os as _os
import re as _re
import sys as _sys
try:
unicode
except NameError:
def uni(v):
if hasattr(v, 'decode'):
return v.decode('latin-1')
return str(v)
else:
def uni(v):
if isinstance(v, unicode):
return v.encode('utf-8')
return str(v)
def write_table(filename, results):
"""
Output tabled benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
try:
next
except NameError:
next = lambda i: (getattr(i, 'next', None) or i.__next__)()
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
names = [
('simple_port', 'Simple Port'),
('jsmin_2_0_9', 'jsmin 2.0.9'),
('rjsmin', '|rjsmin|'),
('_rjsmin', r'_\ |rjsmin|'),
]
benched_per_table = 2
results = sorted(results, reverse=True)
# First we transform our data into a table (list of lists)
pythons, widths = [], [0] * (benched_per_table + 1)
last_version = None
for version, _, result in results:
version = uni(version)
if not(last_version is None or version.startswith('2.')):
continue
last_version = version
namesub = _re.compile(r'(?:-\d+(?:\.\d+)*)?\.js$').sub
result = iter(result)
tables = []
# given our data it's easier to create the table transposed...
for benched in result:
rows = [['Name'] + [desc for _, desc in names]]
for _ in range(benched_per_table):
if _:
try:
benched = next(result)
except StopIteration:
rows.append([''] + ['' for _ in names])
continue
times = dict((
uni(port), (time, benched['sizes'][idx])
) for idx, (port, time) in enumerate(benched['times']))
columns = ['%s (%.1f)' % (
namesub('', _os.path.basename(uni(benched['filename']))),
benched['size'] / 1024.0,
)]
for idx, (port, _) in enumerate(names):
if port not in times:
columns.append('n/a')
continue
time, size = times[port]
if time is None:
columns.append('(failed)')
continue
columns.append('%s%.2f ms (%.1f %s)' % (
idx == 0 and ' ' or '',
time,
size / 1024.0,
idx == 0 and '\\*' or ['=', '>', '<'][
cmp(size, benched['sizes'][0])
],
))
rows.append(columns)
# calculate column widths (global for all tables)
for idx, row in enumerate(rows):
widths[idx] = max(widths[idx], max(map(len, row)))
# ... and transpose it back.
tables.append(zip(*rows))
pythons.append((version, tables))
if last_version.startswith('2.'):
break
# Second we create a rest table from it
lines = []
separator = lambda c='-': '+'.join([''] + [
c * (width + 2) for width in widths
] + [''])
for idx, (version, tables) in enumerate(pythons):
if idx:
lines.append('')
lines.append('')
line = 'Python %s' % (version,)
lines.append(line)
lines.append('~' * len(line))
for table in tables:
lines.append('')
lines.append('.. rst-class:: benchmark')
lines.append('')
for idx, row in enumerate(table):
if idx == 0:
# header
lines.append(separator())
lines.append('|'.join([''] + [
' %s%*s ' % (col, len(col) - width, '')
for width, col in zip(widths, row)
] + ['']))
lines.append(separator('='))
else: # data
lines.append('|'.join([''] + [
j == 0 and (
' %s%*s ' % (col, len(col) - widths[j], '')
) or (
['%*s ', ' %*s '][idx == 1] % (widths[j], col)
)
for j, col in enumerate(row)
] + ['']))
lines.append(separator())
fplines = []
fp = open(filename)
try:
fpiter = iter(fp)
for line in fpiter:
line = line.rstrip()
if line == '.. begin tables':
buf = []
for line in fpiter:
line = line.rstrip()
if line == '.. end tables':
fplines.append('.. begin tables')
fplines.append('')
fplines.extend(lines)
fplines.append('')
fplines.append('.. end tables')
buf = []
break
else:
buf.append(line)
else:
fplines.extend(buf)
_sys.stderr.write("Placeholder container not found!\n")
else:
fplines.append(line)
finally:
fp.close()
fp = open(filename, 'w')
try:
fp.write('\n'.join(fplines) + '\n')
finally:
fp.close()
def write_plain(filename, results):
"""
Output plain benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
lines = []
results = sorted(results, reverse=True)
for idx, (version, import_notes, result) in enumerate(results):
if idx:
lines.append('')
lines.append('')
lines.append('$ python%s -OO bench/main.py bench/*.js' % (
'.'.join(version.split('.')[:2])
))
lines.append('~' * 72)
for note in import_notes:
lines.append(uni(note))
lines.append('Python Release: %s' % (version,))
for single in result:
lines.append('')
lines.append('Benchmarking %r... (%.1f KiB)' % (
uni(single['filename']), single['size'] / 1024.0
))
for msg in single['messages']:
lines.append(msg)
times = []
space = max([len(uni(port)) for port, _ in single['times']])
for idx, (port, time) in enumerate(single['times']):
port = uni(port)
if time is None:
lines.append(" FAILED %s" % (port,))
else:
times.append(time)
lines.append(
" Timing %s%s ... (%5.1f KiB %s) %8.2f ms" % (
port,
" " * (space - len(port)),
single['sizes'][idx] / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(single['sizes'][idx], single['sizes'][0])
],
time
)
)
if len(times) > 1:
lines[-1] += " (factor: %s)" % (', '.join([
'%.2f' % (timed / time) for timed in times[:-1]
]))
lines.append('')
lines.append('')
lines.append('# vim: nowrap')
fp = open(filename, 'w')
try:
fp.write('\n'.join(lines) + '\n')
finally:
fp.close()
def main(argv=None):
""" Main """
import getopt as _getopt
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hp:t:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print(
"%s\nTry %s -mbench.write --help" % (
e,
_os.path.basename(_sys.executable),
), file=_sys.stderr)
_sys.exit(2)
plain, table = None, None
for key, value in opts:
if key in ("-h", "--help"):
print(
"%s -mbench.write [-p plain] [-t table] <pickled" % (
_os.path.basename(_sys.executable),
), file=_sys.stderr)
_sys.exit(0)
elif key == '-p':
plain = str(value)
elif key == '-t':
table = str(value)
struct = []
_sys.stdin = getattr(_sys.stdin, 'detach', lambda: _sys.stdin)()
try:
while True:
version, import_notes, result = _pickle.load(_sys.stdin)
if hasattr(version, 'decode'):
version = version.decode('latin-1')
struct.append((version, import_notes, result))
except EOFError:
pass
if plain:
write_plain(plain, struct)
if table:
write_table(table, struct)
if __name__ == '__main__':
main()
| bsd-3-clause | -5,912,417,761,413,347,000 | 29.800578 | 77 | 0.449282 | false |
Funjando/Birthday-quiz | birthday.py | 1 | 2316 | """
birthday.py
Author: Andreas
Credit: Dan, Payton, tiggerntatie
Assignment:
Your program will ask the user the following questions, in this order:
1. Their name.
2. The name of the month they were born in (e.g. "September").
3. The year they were born in (e.g. "1962").
4. The day they were born on (e.g. "11").
If the user's birthday fell on October 31, then respond with:
You were born on Halloween!
If the user's birthday fell on today's date, then respond with:
Happy birthday!
Otherwise respond with a statement like this:
Peter, you are a winter baby of the nineties.
Example Session
Hello, what is your name? Eric
Hi Eric, what was the name of the month you were born in? September
And what year were you born in, Eric? 1972
And the day? 11
Eric, you are a fall baby of the stone age.
"""
#imports
from datetime import datetime
from calendar import month_name
todaymonth = datetime.today().month
todaydate = datetime.today().day
month=month_name[todaymonth]
#Datalists
winter=['December', 'January', 'February']
spring=['March', 'April', 'May']
summer=['June', 'July', 'August']
fall=['September', 'October', 'November']
eighties=list(range(1980, 1990))
nineties=list(range(1990, 2000))
two_thousands=list(range(2000, 2100))
Stone_Age=list(range(0, 1980))
#Inputs
username=input("Hello, what is your name? ")
birthmonth=input("Hi " + username + ", what was the name of the month you were born in? ")
birthyear=int(input("And what year were you born in, " + username + "? "))
birthday=int(input("And the day? " ))
#Code
if birthmonth=="October" and birthday==31:
print("You were born on Halloween! ")
elif birthmonth==month and birthday==todaydate:
print("Happy birthday! ")
else:
if birthmonth in winter:
season="winter"
if birthmonth in spring:
season="spring"
if birthmonth in summer:
season="summer"
if birthmonth in fall:
season="fall"
if birthyear in eighties:
aeon="eighties"
if birthyear in nineties:
aeon="nineties"
if birthyear in two_thousands:
aeon="two thousands"
if birthyear in Stone_Age:
aeon="Stone Age"
s="{0}, you are a {1} baby of the {2}."
print(s.format(username, season, aeon))
| mit | 3,509,998,420,052,973,000 | 22.876289 | 90 | 0.667962 | false |
bndl/bndl | bndl/net/watchdog.py | 1 | 7044 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio.futures import CancelledError
from datetime import datetime
from random import random
import asyncio
import atexit
import logging
from bndl.net.messages import Ping
logger = logging.getLogger(__name__)
# The time in seconds between checking connections
WATCHDOG_INTERVAL = 2
# allow at most 10 connection attempts
# after that, drop the peer connection from the
# peer table
MAX_CONNECTION_ATTEMPT = 10
# The maximum time in seconds with no communication
# after which a ping is sent
DT_PING_AFTER = 60
# The maximum time in seconds with no communication
# after which the connection is considered lost
DT_MAX_INACTIVE = DT_PING_AFTER * 2
class PeerStats(object):
def __init__(self, peer):
self.peer = peer
self.connection_attempts = 0
self.last_update = datetime.now()
self.last_reconnect = None
self.error_since = None
self.bytes_sent = 0
self.bytes_sent_rate = 0
self.bytes_received = 0
self.bytes_received_rate = 0
def update(self):
now = datetime.now()
interval = (now - self.last_update).total_seconds()
self.last_update = now
if not self.peer.is_connected and self.peer.connected_on is not None:
if not self.error_since:
logger.info('%r disconnected', self.peer)
self.error_since = self.error_since or now
self.bytes_sent_rate = 0
self.bytes_received_rate = 0
return
# calculate tx and rx rates
if self.peer.is_connected:
self.bytes_sent_rate = (self.peer.conn.bytes_sent - self.bytes_sent) / interval
self.bytes_sent = self.peer.conn.bytes_sent
self.bytes_received_rate = (self.peer.conn.bytes_received - self.bytes_received) / interval
self.bytes_received = self.peer.conn.bytes_received
if self.peer.last_rx and (now - self.peer.last_rx).total_seconds() > DT_MAX_INACTIVE:
if not self.error_since:
logger.info('%r is inactive for more than %s seconds (%s)', self.peer,
DT_MAX_INACTIVE, now - self.peer.last_rx)
self.error_since = self.error_since or now
else:
if self.error_since:
logger.info('%s recovered', self.peer)
# clear error stats
self.connection_attempts = 0
self.error_since = None
def __str__(self):
if self.error_since:
fmt = '{peer.name} error since {error_since}'
else:
fmt = '{peer.name} communicating at {bytes_received_rate:.2f} kbps rx, {bytes_sent_rate} kbps tx'
return fmt.format_map(self.__dict__)
class Watchdog(object):
def __init__(self, node):
self.node = node
self._peer_stats = {}
self.monitor_task = None
atexit.register(self.stop)
def start(self):
self.monitor_task = self.node.loop.create_task(self._monitor())
def stop(self):
self.monitor_task = None
def peer_stats(self, peer):
stats = self._peer_stats.get(peer)
if not stats:
self._peer_stats[peer] = stats = PeerStats(peer)
return stats
@asyncio.coroutine
def _monitor(self):
try:
while self.monitor_task and self.node.running:
yield from self._check()
yield from asyncio.sleep(WATCHDOG_INTERVAL, loop=self.node.loop) # @UndefinedVariable
except CancelledError:
pass
@asyncio.coroutine
def _ping(self, peer):
try:
yield from peer.send(Ping())
except Exception:
self.peer_stats(peer).update()
logger.warning('Unable to send ping to peer %r', peer, exc_info=True)
@asyncio.coroutine
def _check(self):
for name in list(self.node.peers.keys()):
try:
# check a connection with a peer
yield from self._check_peer(name)
except CancelledError:
raise
except Exception:
logger.exception('unable to check peer %s of %s', self.node.name, name)
# if no nodes are connected, attempt to connect with the seeds
if not any(peer.is_connected for peer in self.node.peers.values()):
yield from self.node._connect_seeds()
@asyncio.coroutine
def _check_peer(self, name):
try:
peer = self.node.peers[name]
except KeyError:
return
if peer.name != name:
logger.info('Peer %s of node %s registered under %s, updating registration',
peer.name, self.node.name, name)
peer = self.node.peers.pop(name)
self.node.peers[name] = peer
stats = self.peer_stats(peer)
stats.update()
if stats.connection_attempts > MAX_CONNECTION_ATTEMPT:
popped = self.node.peers.pop(name)
if popped != peer:
self.node.peers[name] = popped
yield from peer.disconnect('disconnected by watchdog after %s failed connection attempts',
stats.connection_attempts)
elif stats.error_since:
# max reconnect interval is:
# - twice the watch_dog interval (maybe something was missed)
# - exponentially to the connection attempts (exponentially back off)
# - with a random factor between 1 +/- .25
now = datetime.now()
connect_wait = WATCHDOG_INTERVAL * 2 ** stats.connection_attempts * (.75 + random() / 2)
if (now - stats.error_since).total_seconds() > WATCHDOG_INTERVAL * 2 and \
(not stats.last_reconnect or (now - stats.last_reconnect).total_seconds() > connect_wait):
stats.connection_attempts += 1
stats.last_reconnect = now
yield from peer.connect()
elif peer.is_connected and \
peer.last_rx and \
(datetime.now() - peer.last_rx).total_seconds() > DT_PING_AFTER:
yield from self._ping(peer)
def rxtx_stats(self):
stats = dict(
bytes_sent=0,
bytes_sent_rate=0,
bytes_received=0,
bytes_received_rate=0
)
for peer_stats in self._peer_stats.values():
for k in stats.keys():
stats[k] += getattr(peer_stats, k, 0)
return stats
| apache-2.0 | -6,064,973,426,037,575,000 | 32.542857 | 109 | 0.59753 | false |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/sqlite.py | 1 | 1466 | """
.. function:: sqlite(dbfilename, query:None)
Connects to an SQLite DB and returns the results of query.
Examples:
>>> sql("select * from (sqlite 'testdb.db' select 5 as num, 'test' as text);")
num | text
-----------
5 | test
"""
import functions
import os
import vtbase
registered = True
external_query = True
class SQLite(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
if len(largs) > 0:
sqdb = largs[0]
if 'db' in dictargs:
sqdb = dictargs['db']
sqdb = str(os.path.abspath(os.path.expandvars(os.path.expanduser(os.path.normcase(sqdb)))))
conn = functions.Connection(sqdb)
cur = conn.cursor()
cur.execute(query)
yield cur.getdescriptionsafe()
while True:
yield cur.next()
cur.close()
def Source():
return vtbase.VTGenerator(SQLite)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| mit | 839,091,539,181,260,900 | 19.647887 | 99 | 0.583902 | false |
shibanis1/spark-tk | python/sparktk/frame/ops/drop_rows.py | 1 | 1541 | from sparktk.frame.row import Row
def drop_rows(self, predicate):
"""
Erase any row in the current frame which qualifies.
Parameters
----------
:param predicate: (UDF) Function which evaluates a row to a boolean; rows that answer True are dropped from
the frame.
Examples
--------
>>> frame = tc.frame.create([['Fred',39,16,'555-1234'],
... ['Susan',33,3,'555-0202'],
... ['Thurston',65,26,'555-4510'],
... ['Judy',44,14,'555-2183']],
... schema=[('name', str), ('age', int), ('tenure', int), ('phone', str)])
>>> frame.inspect()
[#] name age tenure phone
====================================
[0] Fred 39 16 555-1234
[1] Susan 33 3 555-0202
[2] Thurston 65 26 555-4510
[3] Judy 44 14 555-2183
>>> frame.drop_rows(lambda row: row.name[-1] == 'n') # drop people whose name ends in 'n'
>>> frame.inspect()
[#] name age tenure phone
================================
[0] Fred 39 16 555-1234
[1] Judy 44 14 555-2183
More information on a |UDF| can be found at :doc:`/ds_apir`.
"""
row = Row(self.schema)
def drop_rows_func(r):
row._set_data(r)
return not predicate(row)
self._python.rdd = self._python.rdd.filter(drop_rows_func)
| apache-2.0 | 887,973,931,321,488,600 | 33.244444 | 111 | 0.447761 | false |
qix/tooler | tooler/command.py | 1 | 1233 | import io
from typing import Dict, Optional
from .exceptions import CommandHelpException
from .parser import DefaultParser
class Command:
def __init__(self):
pass
def run(self, selector, argv):
raise Exception("not implemented")
class DecoratorCommand(Command):
def __init__(self, fn, doc=None, parser=None, shorthands: Optional[Dict[str, str]] = None):
# @todo: Should just take an actual `parser` object, but need to do a large
# refactor to fix that.
if parser:
assert not shorthands, "Shorthands option is not compatible with custom parser"
self.parser = parser()
else:
self.parser = DefaultParser(shorthands=shorthands)
self.fn = fn
self.doc = doc
def run(self, selector, argv):
try:
(args, vargs) = self.parser.parse(
self.fn,
self.doc,
selector,
argv
)
except CommandHelpException as e:
print(e.usage)
return
try:
return self.fn(*args, **vargs)
finally:
# Close any files that were opened as arguments
for value in [*args, *vargs.values()]:
# Skip as linter is not aware of `file` type
if isinstance(value, io.IOBase):
value.close()
| mit | -8,756,057,895,321,486,000 | 24.6875 | 93 | 0.632603 | false |
felixmatt/shyft | shyft/repository/service/yaml_geo_location_repository.py | 1 | 1391 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import yaml
from .ssa_geo_ts_repository import GeoLocationRepository
from os import path
class YamlGeoLocationError(Exception):
pass
class YamlGeoLocationRepository(GeoLocationRepository):
"""
Provide a yaml-based key-location map for gis-identites not available(yet)
"""
def __init__(self, yaml_file_dir):
"""
Parameters
----------
yaml_file_dir:string
path to directory where files
pt_locations-epsg_32632.yml (UTM32N) and
pt_locations-epsg_32633.yml (UTM33N)
pt_locations-<epsg_id>.yml
"""
self._file_dir = yaml_file_dir
def read_location_dict(self, epsg_id):
full_name = path.join(self._file_dir, _filename_of(epsg_id))
with open(full_name, 'r') as f:
return yaml.load(f)
def get_locations(self, location_id_list, epsg_id=32632):
loc_dict = self.read_location_dict(epsg_id)
locations = {}
for index in location_id_list:
if loc_dict.get(index) is not None:
locations[index] = tuple(loc_dict[index])
else:
raise YamlGeoLocationError("Could not get location of geo point-id!")
return locations
def _filename_of(epsg_id):
return "pt_locations-epsg_{}.yml".format(epsg_id)
| lgpl-3.0 | -733,572,491,415,712,300 | 27.979167 | 85 | 0.608196 | false |
UNINETT/nav | python/nav/portadmin/handlers.py | 1 | 11747 | #
# Copyright (C) 2011-2015, 2020 UNINETT
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Interface definition for PortAdmin management handlers"""
import time
from typing import List, Tuple, Dict, Any, Sequence
import logging
from nav.models import manage
from nav.portadmin.vlan import FantasyVlan
_logger = logging.getLogger(__name__)
class ManagementHandler:
"""Defines a common interface for all types of PortAdmin management handlers.
This defines the set of methods that a handler class may be expected by PortAdmin
to provide, regardless of the underlying management protocol implemented by such
a class.
"""
def __init__(self, netbox: manage.Netbox, **kwargs):
self.netbox = netbox
def set_interface_description(self, interface: manage.Interface, description: str):
"""Configures a single interface's description, AKA the ifalias value"""
raise NotImplementedError
def get_interface_native_vlan(self, interface: manage.Interface) -> int:
"""Retrieves the native/untagged VLAN configured on interface"""
raise NotImplementedError
def get_interfaces(
self, interfaces: Sequence[manage.Interface] = None
) -> List[Dict[str, Any]]:
"""Retrieves running configuration switch ports on the device.
:param interfaces: Optional list of interfaces to filter for, as fetching
data for all interfaces may be a waste of time if only a
single interface is needed. The implementing
handler/protocol may not support this filter, so do not rely
on it.
:returns: A list of dicts with members `name`, `description`, `oper`, `admin`
and `vlan` (the latter being the access/untagged/native vlan ID.
"""
raise NotImplementedError
def set_vlan(self, interface, vlan):
"""Set a new vlan on the given interface and remove the previous vlan"""
raise NotImplementedError
def set_native_vlan(self, interface: manage.Interface, vlan: int):
"""Set native vlan on a trunk interface"""
raise NotImplementedError
def set_interface_up(self, interface: manage.Interface):
"""Enables a previously shutdown interface"""
raise NotImplementedError
def set_interface_down(self, interface: manage.Interface):
"""Shuts down/disables an enabled interface"""
raise NotImplementedError
def cycle_interfaces(
self,
interfaces: Sequence[manage.Interface],
wait: float = 5.0,
commit: bool = False,
):
"""Link cycles a set of interfaces, with an optional delay in between.
Mostly used for configuration changes where any client connected to an
interface needs to be notified about a network change. Typically,
if an interface is suddenly placed on a new VLAN, cycling the link status of
the interface will prompt any connected machine to ask for a new DHCP lease,
which may be necessary now that the machine is potentially on a different IP
subnet.
:param interfaces: The list of interfaces to cycle.
:param wait: number of seconds to wait between down and up operations.
:param commit: If True, issues a config commit when the interface have been
disabled, and issues a new commit when they have been enabled
again.
"""
if not interfaces:
return
netbox = set(ifc.netbox for ifc in interfaces)
assert len(netbox) == 1, "Interfaces belong to multiple netboxes"
netbox = list(netbox)[0]
assert netbox == self.netbox, "Interfaces belong to wrong netbox"
to_cycle = self._filter_oper_up_interfaces(interfaces)
if not to_cycle:
_logger.debug("No interfaces to cycle on %s", netbox.sysname)
return
_logger.debug("Taking interfaces administratively down")
for ifc in to_cycle:
self.set_interface_down(ifc)
_logger.debug(ifc.ifname)
if commit:
self.commit_configuration()
if wait:
time.sleep(wait)
_logger.debug("Taking interfaces administratively up again")
for ifc in to_cycle:
self.set_interface_up(ifc)
_logger.debug(ifc.ifname)
if commit:
self.commit_configuration()
def _filter_oper_up_interfaces(
self, interfaces: Sequence[manage.Interface]
) -> List[manage.Interface]:
"""Filters a list of Interface objects, returning only those that are
currently operationally up.
"""
oper_up = set(
ifc["name"]
for ifc in self.get_interfaces(interfaces)
if ifc["oper"] == manage.Interface.OPER_UP
)
to_cycle = [ifc for ifc in interfaces if ifc.ifname in oper_up]
if len(to_cycle) < len(interfaces):
_logger.debug(
"Link cycling on %s: Asked to cycle %r, but only %r is oper up",
self.netbox.sysname,
[ifc.ifname for ifc in interfaces],
[ifc.ifname for ifc in to_cycle],
)
return to_cycle
def commit_configuration(self):
"""Commit running configuration or pending configuration changes to the
device's startup configuration.
This operation has different implications depending on the underlying
platform and management protocol, and may in some instances be a no-op.
This would map more or less one-to-one when using NETCONF and related protocols,
whereas when using SNMP on Cisco, this may consist of a "write mem" operation.
"""
raise NotImplementedError
def get_interface_admin_status(self, interface: manage.Interface) -> int:
"""Query administrative status of an individual interface.
:returns: A integer to be interpreted as an RFC 2863 ifAdminStatus value, also
defined in `manage.Interface.ADMIN_STATUS_CHOICES`:
> up(1), -- ready to pass packets
> down(2),
> testing(3) -- in some test mode
"""
raise NotImplementedError
def get_netbox_vlans(self) -> List[FantasyVlan]:
"""Returns a list of FantasyVlan objects representing the enabled VLANs on
this netbox.
The FantasyVlan objects represent NAV VLAN objects where a VLAN tag can be
correlated with a NAV VLAN entry, but can also be used to represent VLAN tags
that are unknown to NAV.
"""
raise NotImplementedError
def get_netbox_vlan_tags(self) -> List[int]:
"""Returns a list of enabled VLANs on this netbox.
:returns: A list of VLAN tags (integers)
"""
raise NotImplementedError
def set_interface_voice_vlan(self, interface: manage.Interface, voice_vlan: int):
"""Activates the voice vlan on this interface.
The default implementation is to employ PortAdmin's generic trunk-based voice
VLAN concept. This entails setting the interface to trunk mode, keeping the
untagged VLAN as its native VLAN and trunking/tagging the voice VLAN.
A vendor-specific implementation in an inheriting class may opt to use a more
appropriate vendor-specific implementation (one example is Cisco voice VLAN).
"""
self.set_trunk(interface, interface.vlan, [voice_vlan])
def get_cisco_voice_vlans(self):
"""Should not be implemented on anything else than Cisco"""
raise NotImplementedError
def set_cisco_voice_vlan(self, interface, voice_vlan):
"""Should not be implemented on anything else than Cisco"""
raise NotImplementedError
def enable_cisco_cdp(self, interface):
"""Should not be implemented on anything else than Cisco"""
raise NotImplementedError
def disable_cisco_voice_vlan(self, interface):
"""Should not be implemented on anything else than Cisco"""
raise NotImplementedError
def disable_cisco_cdp(self, interface):
"""Should not be implemented on anything else than Cisco"""
raise NotImplementedError
def get_native_and_trunked_vlans(self, interface) -> Tuple[int, List[int]]:
"""Get the trunked vlans on this interface
:returns: (native_vlan_tag, list_of_trunked_vlan_tags)
"""
raise NotImplementedError
def set_access(self, interface: manage.Interface, access_vlan: int):
"""Puts a port in access mode and sets its access/native/untagged VLAN.
An implementation must also update the Interface object in the NAVdb.
"""
raise NotImplementedError
def set_trunk(
self, interface: manage.Interface, native_vlan: int, trunk_vlans: Sequence[int]
):
"""Puts a port in trunk mode, setting its native/untagged VLAN and tagged
trunk VLANs as well.
An implementation must also update the Interface object in the NAVdb.
:param interface: The interface to set to trunk mode.
:param native_vlan: The native VLAN for untagged packets on this interface.
:param trunk_vlans: A list of VLAN tags to allow on this trunk.
"""
raise NotImplementedError
def is_dot1x_enabled(self, interface: manage.Interface) -> bool:
"""Returns True if 802.1X authentication is is enabled on interface"""
raise NotImplementedError
def get_dot1x_enabled_interfaces(self) -> Dict[str, bool]:
"""Fetches the 802.1X enabled state of every interface.
:returns: A dict mapping each interface name to a "802.1X enabled" value
"""
raise NotImplementedError
def is_port_access_control_enabled(self) -> bool:
"""Returns True if port access control is enabled on this netbox"""
raise NotImplementedError
def raise_if_not_configurable(self):
"""Raises an exception if this netbox cannot be configured through PortAdmin.
The exception message will contain a human-readable explanation as to why not.
"""
raise NotImplementedError
def is_configurable(self) -> bool:
"""Returns True if this netbox is configurable using this handler"""
try:
self.raise_if_not_configurable()
except Exception:
return False
return True
class ManagementError(Exception):
"""Base exception class for device management errors"""
class DeviceNotConfigurableError(ManagementError):
"""Raised when a device is not configurable by PortAdmin for some reason"""
class NoResponseError(ManagementError):
"""Raised whenever there is no response when talking to the remote device"""
class AuthenticationError(ManagementError):
"""Raised where the remote device indicated the wrong credentials were used"""
class ProtocolError(ManagementError):
"""Raised when some non-categorized error in the underlying protocol occurred
during communication
"""
| gpl-2.0 | 7,145,446,863,028,580,000 | 38.026578 | 88 | 0.660935 | false |
demonchild2112/travis-test | grr/server/grr_response_server/databases/db_clients_test.py | 1 | 71030 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.builtins import range
from future.utils import iteritems
from future.utils import iterkeys
import mock
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.util import collection
from grr_response_server import flow
from grr_response_server.databases import db
from grr_response_server.databases import db_test_utils
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
CERT = rdf_crypto.RDFX509Cert(b"""-----BEGIN CERTIFICATE-----
MIIF7zCCA9egAwIBAgIBATANBgkqhkiG9w0BAQUFADA+MQswCQYDVQQGEwJVUzEM
MAoGA1UECBMDQ0FMMQswCQYDVQQHEwJTRjEUMBIGA1UEAxMLR1JSIFRlc3QgQ0Ew
HhcNMTEwNTI3MTIxNTExWhcNMTIwNTI2MTIxNTExWjBCMQswCQYDVQQGEwJVUzEM
MAoGA1UECBMDQ0FMMQswCQYDVQQHEwJTRjEYMBYGA1UEAxMPR1JSIFRlc3QgU2Vy
dmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAwUXBNzWSoEr88dGQ
qZWSwgJ+n/A/QQyNn/ZM57XsqI6IMO6plFmA+DZv2FkTTdniNPmhuL9mjWYA5yg4
KYMbz5igOiBoF9RBeIm2/v2Sg65VFoyCgJNgl3V34mpoDCHBYTi2A/OfoKeSQISb
UfMHsYhPHdGfhjk8dEuMo7MxjrtfAO3Y4QtjTiE07eNdoRQkFtzF0m9oSaytJ95c
BAe1eQ/2zcvxPvnF5yavR4fwKQtk8o1hc21XVG0JvqJ7da79C27cQQP3E/6EYzpN
pkh9n4berPBHV/oxlB2np4zKgXCQ4zDdiw1uEUY9+iFmVEuvzO2e5NJcfnu74sGb
oX+2a2/ph65sMZ2/NF8lRgetvIrtYUl15yypXmH3VobBYvpfGpab1rLt0J1HoVUh
V5Nsrdav0n8EQ+hln/sHz+G5rNe4ZSJbZ8w8b1TOwTENdzOYKAQH/NN9IrsbXNgE
8RHSHfPwibWnhfKS/fy7GO8qah/u2HPQ5S33gao409zbwS6c4sn0nAQhr5H6pHVD
iMLcBPFQ+w6zIk28hOv3GMa5XQtm8ONb/QhOLTbtB+ZCHKCw3bXASVDt7EwvnM/b
cSYS58wKmUQhH3unizXyihLhxC8ck/KMTkGnuGBC0Pz2d6YgcdL4BxAK6udSjSQQ
DB8sWYKJJrmlCnaN2E1eBbPV5PMCAwEAAaOB8zCB8DAJBgNVHRMEAjAAMBEGCWCG
SAGG+EIBAQQEAwIGQDArBglghkgBhvhCAQ0EHhYcVGlueUNBIEdlbmVyYXRlZCBD
ZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUywgOS64OISRSFNqpMpF83qXKDPIwbgYDVR0j
BGcwZYAUO4+Xefeqvq3W6/eaPxaNv8IHpcuhQqRAMD4xCzAJBgNVBAYTAlVTMQww
CgYDVQQIEwNDQUwxCzAJBgNVBAcTAlNGMRQwEgYDVQQDEwtHUlIgVGVzdCBDQYIJ
AIayxnA7Bp+3MAkGA1UdEgQCMAAwCQYDVR0RBAIwADANBgkqhkiG9w0BAQUFAAOC
AgEAY6z2VZdS83i6N88hVk3Y8qt0xNhP10+tfgsI7auPq2n3PsDNOLPvp2OcUcLI
csMQ/3GTI84uRm0GFnLMAc+A8BQZ14+3kPRju5jWe3KMfP1Ohz5Hm36Uf47tFhgV
VYnyIPwwCE1QPOgbnFt5jR+d3pjhx9TvjfeFKmavxMpxnDD2KWgGZfuE1UqC0DXm
rkimG2Q+dHUFBOMBUKzaklZsr7v4hlc+7XY1n5vRhiuczS9m5mVB05Cg4mrJFcVs
AUsxSuwgMhJqxuNaFw8qMmdkX7ujo5HAtwJqIi91Sdj8xNRqDysd1OagqL3Mx172
wTJu7ZIAURpw52AXxn3PpK5NS3NSvL/PE6SnpHCtfkxaHl/80W2oq7MjSaHbQt2g
8vYuwLEKYVhgEBzEK0p5AqDyabAn49bw9hfT10NElJ/tYEPCKZZwrARBHnpCxLeC
jJVIIMzPOczWnTDw92ls3l6+l075MOzXGo94GNlxt0/HLCQktl9cuF1APmRkiGUe
EaQA1dggxMyZGyZpYmEbrWCiEjKqfIXXnpyw5pxL5Rvoe4kYrQBvbJ1aaWJ87Pcz
gXJvjIkzp4x/MMAgdBOqJm5tJ4nhCHTbXWuIbYymPLn7hqXhyrDZwqnH7kQKPF2/
z5KjO8gWio6YOhsDwrketcBcIANMDYws2+TzrLs9ttuHNS0=
-----END CERTIFICATE-----""")
def _DaysSinceEpoch(days):
return rdfvalue.RDFDatetime(
rdfvalue.Duration.From(days, rdfvalue.DAYS).microseconds)
def _FlattenDicts(dicts):
"""Merges an iterable of dicts into one dict."""
result = {}
for dict_obj in dicts:
result.update(dict_obj)
return result
class DatabaseTestClientsMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of client data.
"""
def testClientWriteToUnknownClient(self):
d = self.db
client_id = "C.fc413187fefa1dcf"
with self.assertRaises(db.UnknownClientError) as context:
d.WriteClientSnapshot(rdf_objects.ClientSnapshot(client_id=client_id))
self.assertEqual(context.exception.client_id, client_id)
def testKeywordWriteToUnknownClient(self):
d = self.db
client_id = "C.fc413187fefa1dcf"
with self.assertRaises(db.UnknownClientError) as context:
d.AddClientKeywords(client_id, ["keyword"])
self.assertEqual(context.exception.client_id, client_id)
d.RemoveClientKeyword(client_id, "test")
def testLabelWriteToUnknownClient(self):
d = self.db
client_id = "C.fc413187fefa1dcf"
with self.assertRaises(db.UnknownClientError) as context:
d.AddClientLabels(client_id, "testowner", ["label"])
self.assertEqual(context.exception.client_id, client_id)
d.RemoveClientLabels(client_id, "testowner", ["label"])
def testAddRemoveClientLabelsWorkWithTuplesAsArgument(self):
# See https://github.com/google/grr/issues/716 for an additional context.
# AddClientlabels/ReadClientLabels require "labels" argument to be
# iterable. DB implementation has to respect this assumption.
d = self.db
client_id = "C.fc413187fefa1dcf"
with self.assertRaises(db.UnknownClientError) as context:
d.AddClientLabels(client_id, "testowner", ("label",))
self.assertEqual(context.exception.client_id, client_id)
d.RemoveClientLabels(client_id, "testowner", ("label",))
def testClientMetadataInitialWrite(self):
d = self.db
client_id_1 = "C.fc413187fefa1dcf"
# Typical initial FS enabled write
d.WriteClientMetadata(client_id_1, fleetspeak_enabled=True)
client_id_2 = "C.00413187fefa1dcf"
# Typical initial non-FS write
d.WriteClientMetadata(
client_id_2,
certificate=CERT,
first_seen=rdfvalue.RDFDatetime(100000000),
fleetspeak_enabled=False)
res = d.MultiReadClientMetadata([client_id_1, client_id_2])
self.assertLen(res, 2)
m1 = res[client_id_1]
self.assertIsInstance(m1, rdf_objects.ClientMetadata)
self.assertTrue(m1.fleetspeak_enabled)
m2 = res[client_id_2]
self.assertIsInstance(m2, rdf_objects.ClientMetadata)
self.assertFalse(m2.fleetspeak_enabled)
self.assertEqual(m2.certificate, CERT)
self.assertEqual(m2.first_seen, rdfvalue.RDFDatetime(100000000))
def testClientMetadataSubsecond(self):
client_id = "C.fc413187fefa1dcf"
self.db.WriteClientMetadata(
client_id,
certificate=CERT,
first_seen=rdfvalue.RDFDatetime(100000001),
last_clock=rdfvalue.RDFDatetime(100000011),
last_foreman=rdfvalue.RDFDatetime(100000021),
last_ping=rdfvalue.RDFDatetime(100000031),
fleetspeak_enabled=False)
res = self.db.MultiReadClientMetadata([client_id])
self.assertLen(res, 1)
m1 = res[client_id]
self.assertEqual(m1.first_seen, rdfvalue.RDFDatetime(100000001))
self.assertEqual(m1.clock, rdfvalue.RDFDatetime(100000011))
self.assertEqual(m1.last_foreman_time, rdfvalue.RDFDatetime(100000021))
self.assertEqual(m1.ping, rdfvalue.RDFDatetime(100000031))
def testClientMetadataPing(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
# Typical update on client ping.
d.WriteClientMetadata(
client_id,
last_ping=rdfvalue.RDFDatetime(200000000000),
last_clock=rdfvalue.RDFDatetime(210000000000),
last_ip=rdf_client_network.NetworkAddress(
human_readable_address="8.8.8.8"),
last_foreman=rdfvalue.RDFDatetime(220000000000))
res = d.MultiReadClientMetadata([client_id])
self.assertLen(res, 1)
m1 = res[client_id]
self.assertIsInstance(m1, rdf_objects.ClientMetadata)
self.assertTrue(m1.fleetspeak_enabled)
self.assertEqual(m1.ping, rdfvalue.RDFDatetime(200000000000))
self.assertEqual(m1.clock, rdfvalue.RDFDatetime(210000000000))
self.assertEqual(
m1.ip,
rdf_client_network.NetworkAddress(human_readable_address="8.8.8.8"))
self.assertEqual(m1.last_foreman_time, rdfvalue.RDFDatetime(220000000000))
def testClientMetadataValidatesIP(self):
d = self.db
client_id = "C.fc413187fefa1dcf"
with self.assertRaises(TypeError):
d.WriteClientMetadata(
client_id, fleetspeak_enabled=True, last_ip="127.0.0.1")
def testReadAllClientIDsEmpty(self):
result = list(self.db.ReadAllClientIDs())
self.assertEmpty(result)
def testReadAllClientIDsSome(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
client_c_id = db_test_utils.InitializeClient(self.db)
client_ids = list(self.db.ReadAllClientIDs())
self.assertLen(client_ids, 1)
self.assertCountEqual(client_ids[0],
[client_a_id, client_b_id, client_c_id])
def testReadAllClientIDsNotEvenlyDivisibleByBatchSize(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
client_c_id = db_test_utils.InitializeClient(self.db)
client_ids = list(self.db.ReadAllClientIDs(batch_size=2))
self.assertEqual([len(batch) for batch in client_ids], [2, 1])
self.assertCountEqual(
collection.Flatten(client_ids), [client_a_id, client_b_id, client_c_id])
def testReadAllClientIDsEvenlyDivisibleByBatchSize(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
client_c_id = db_test_utils.InitializeClient(self.db)
client_d_id = db_test_utils.InitializeClient(self.db)
client_ids = list(self.db.ReadAllClientIDs(batch_size=2))
self.assertEqual([len(batch) for batch in client_ids], [2, 2])
self.assertCountEqual(
collection.Flatten(client_ids),
[client_a_id, client_b_id, client_c_id, client_d_id])
def testReadAllClientIDsFilterLastPing(self):
self.db.WriteClientMetadata("C.0000000000000001", fleetspeak_enabled=True)
self.db.WriteClientMetadata(
"C.0000000000000002",
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2))
self.db.WriteClientMetadata(
"C.0000000000000003",
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3))
self.db.WriteClientMetadata(
"C.0000000000000004",
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4))
client_ids = self.db.ReadAllClientIDs(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3))
self.assertCountEqual(
collection.Flatten(client_ids),
["C.0000000000000003", "C.0000000000000004"])
def testReadClientLastPings_ResultsDivisibleByBatchSize(self):
client_ids = self._WriteClientLastPingData()
(client_id5, client_id6, client_id7, client_id8, client_id9,
client_id10) = client_ids[4:]
results = list(
self.db.ReadClientLastPings(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
batch_size=3))
self.assertEqual([len(batch) for batch in results], [3, 3])
self.assertEqual(
_FlattenDicts(results), {
client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id8: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id9: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
client_id10: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
})
def testReadClientLastPings_ResultsNotDivisibleByBatchSize(self):
client_ids = self._WriteClientLastPingData()
(client_id5, client_id6, client_id7, client_id8, client_id9,
client_id10) = client_ids[4:]
results = list(
self.db.ReadClientLastPings(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
batch_size=4))
self.assertEqual([len(batch) for batch in results], [4, 2])
self.assertEqual(
_FlattenDicts(results), {
client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id8: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id9: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
client_id10: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
})
def testReadClientLastPings_NoFilter(self):
client_ids = self._WriteClientLastPingData()
(client_id1, client_id2, client_id3, client_id4, client_id5, client_id6,
client_id7, client_id8, client_id9, client_id10) = client_ids
self.assertEqual(
list(self.db.ReadClientLastPings()), [{
client_id1: None,
client_id2: None,
client_id3: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2),
client_id4: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2),
client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id8: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id9: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
client_id10: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
}])
def testReadClientLastPings_AllFiltersFleetspeak(self):
client_ids = self._WriteClientLastPingData()
client_id6 = client_ids[5]
client_id8 = client_ids[7]
actual_data = self.db.ReadClientLastPings(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
max_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
fleetspeak_enabled=True)
expected_data = [{
client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id8: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
}]
self.assertEqual(list(actual_data), expected_data)
def testReadClientLastPings_AllFiltersNoFleetspeak(self):
client_ids = self._WriteClientLastPingData()
client_id5 = client_ids[4]
client_id7 = client_ids[6]
actual_data = self.db.ReadClientLastPings(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
max_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
fleetspeak_enabled=False)
expected_data = [{
client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
}]
self.assertEqual(list(actual_data), expected_data)
def testReadClientLastPings_MinPingFleetspeakFilters(self):
client_ids = self._WriteClientLastPingData()
client_id5 = client_ids[4]
client_id7 = client_ids[6]
client_id9 = client_ids[8]
actual_data = self.db.ReadClientLastPings(
min_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
fleetspeak_enabled=False)
expected_data = [{
client_id5: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
client_id7: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
client_id9: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
}]
self.assertEqual(list(actual_data), expected_data)
def testReadClientLastPings_MaxPingFleetspeakFilters(self):
client_ids = self._WriteClientLastPingData()
client_id2 = client_ids[1]
client_id4 = client_ids[3]
client_id6 = client_ids[5]
actual_data = self.db.ReadClientLastPings(
max_last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
fleetspeak_enabled=True)
expected_data = [{
client_id2: None,
client_id4: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2),
client_id6: rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
}]
self.assertEqual(list(actual_data), expected_data)
def _WriteClientLastPingData(self):
"""Writes test data for ReadClientLastPings() tests."""
client_ids = tuple("C.00000000000000%02d" % i for i in range(1, 11))
(client_id1, client_id2, client_id3, client_id4, client_id5, client_id6,
client_id7, client_id8, client_id9, client_id10) = client_ids
self.db.WriteClientMetadata(client_id1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id2, fleetspeak_enabled=True)
self.db.WriteClientMetadata(
client_id3, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2))
self.db.WriteClientMetadata(
client_id4,
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2),
fleetspeak_enabled=True)
self.db.WriteClientMetadata(
client_id5, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3))
self.db.WriteClientMetadata(
client_id6,
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3),
fleetspeak_enabled=True)
self.db.WriteClientMetadata(
client_id7, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4))
self.db.WriteClientMetadata(
client_id8,
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4),
fleetspeak_enabled=True)
self.db.WriteClientMetadata(
client_id9, last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5))
self.db.WriteClientMetadata(
client_id10,
last_ping=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5),
fleetspeak_enabled=True)
return client_ids
def _SetUpReadClientSnapshotHistoryTest(self):
d = self.db
self.client_id = db_test_utils.InitializeClient(self.db)
timestamps = [rdfvalue.RDFDatetime.Now()]
client = rdf_objects.ClientSnapshot(client_id=self.client_id, kernel="12.3")
client.knowledge_base.fqdn = "test1234.examples.com"
d.WriteClientSnapshot(client)
timestamps.append(d.ReadClientSnapshot(self.client_id).timestamp)
timestamps.append(rdfvalue.RDFDatetime.Now())
client.kernel = "12.4"
d.WriteClientSnapshot(client)
timestamps.append(d.ReadClientSnapshot(self.client_id).timestamp)
timestamps.append(rdfvalue.RDFDatetime.Now())
return timestamps
def testReadClientSnapshotHistory(self):
d = self.db
self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id)
self.assertLen(hist, 2)
self.assertIsInstance(hist[0], rdf_objects.ClientSnapshot)
self.assertIsInstance(hist[1], rdf_objects.ClientSnapshot)
self.assertGreater(hist[0].timestamp, hist[1].timestamp)
self.assertIsInstance(hist[0].timestamp, rdfvalue.RDFDatetime)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
def testReadClientSnapshotHistoryWithEmptyTimerange(self):
d = self.db
self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(None, None))
self.assertLen(hist, 2)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
def testReadClientSnapshotHistoryWithTimerangeWithBothFromTo(self):
d = self.db
ts = self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[0], ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.3")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[2], ts[4]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.4")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[0], ts[4]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
def testReadClientSnapshotHistoryWithTimerangeWithFromOnly(self):
d = self.db
ts = self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[0], None))
self.assertLen(hist, 2)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[2], None))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.4")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[4], None))
self.assertEmpty(hist)
def testReadClientSnapshotHistoryWithTimerangeWithToOnly(self):
d = self.db
ts = self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(None, ts[0]))
self.assertEmpty(hist)
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(None, ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.3")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(None, ts[4]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
def testReadClientSnapshotHistoryWithTimerangeEdgeCases(self):
# Timerange should work as [from, to]. I.e. "from" is inclusive and "to"
# is inclusive.
d = self.db
ts = self._SetUpReadClientSnapshotHistoryTest()
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[1], ts[1]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.3")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[1], ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].kernel, "12.3")
hist = d.ReadClientSnapshotHistory(self.client_id, timerange=(ts[1], ts[3]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].kernel, "12.4")
self.assertEqual(hist[1].kernel, "12.3")
def testWriteClientSnapshotHistory(self):
client_id = db_test_utils.InitializeClient(self.db)
client_a = rdf_objects.ClientSnapshot(client_id=client_id)
client_a.kernel = "1.2.3"
client_a.startup_info.client_info.client_version = 42
client_a.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
client_b = rdf_objects.ClientSnapshot(client_id=client_id)
client_b.kernel = "4.5.6"
client_b.startup_info.client_info.client_version = 108
client_b.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-02-01")
client_c = rdf_objects.ClientSnapshot(client_id=client_id)
client_c.kernel = "7.8.9"
client_c.startup_info.client_info.client_version = 707
client_c.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-03-01")
self.db.WriteClientSnapshotHistory([client_a, client_b, client_c])
# Check whether the client history has been recorded correctly.
history = self.db.ReadClientSnapshotHistory(client_id)
self.assertLen(history, 3)
self.assertEqual(history[0].kernel, "7.8.9")
self.assertEqual(history[0].startup_info.client_info.client_version, 707)
self.assertEqual(history[0].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-03-01"))
self.assertEqual(history[1].kernel, "4.5.6")
self.assertEqual(history[1].startup_info.client_info.client_version, 108)
self.assertEqual(history[1].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-02-01"))
self.assertEqual(history[2].kernel, "1.2.3")
self.assertEqual(history[2].startup_info.client_info.client_version, 42)
self.assertEqual(history[2].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01"))
# Check whether the snapshot history has been recorded correctly.
history = self.db.ReadClientStartupInfoHistory(client_id)
self.assertLen(history, 3)
self.assertEqual(history[0].client_info.client_version, 707)
self.assertEqual(history[0].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-03-01"))
self.assertEqual(history[1].client_info.client_version, 108)
self.assertEqual(history[1].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-02-01"))
self.assertEqual(history[2].client_info.client_version, 42)
self.assertEqual(history[2].timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01"))
def testWriteClientSnapshotHistoryUpdatesLastTimestampIfNotSet(self):
client_id = db_test_utils.InitializeClient(self.db)
client_new = rdf_objects.ClientSnapshot(client_id=client_id)
client_new.kernel = "1.0.0"
client_new.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
self.db.WriteClientSnapshotHistory([client_new])
info = self.db.ReadClientFullInfo(client_id)
self.assertEqual(info.last_snapshot.kernel, "1.0.0")
self.assertEqual(info.last_snapshot.timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01"))
self.assertEqual(info.last_startup_info.timestamp,
rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01"))
def testWriteClientSnapshotHistoryUpdatesLastTimestampIfNewer(self):
client_id = db_test_utils.InitializeClient(self.db)
client_old = rdf_objects.ClientSnapshot(client_id=client_id)
client_old.kernel = "1.0.0"
self.db.WriteClientSnapshot(client_old)
old_timestamp = self.db.ReadClientSnapshot(client_id).timestamp
client_new = rdf_objects.ClientSnapshot(client_id=client_id)
client_new.kernel = "2.0.0"
client_new.timestamp = rdfvalue.RDFDatetime.Now()
self.db.WriteClientSnapshotHistory([client_new])
info = self.db.ReadClientFullInfo(client_id)
self.assertEqual(info.last_snapshot.kernel, "2.0.0")
self.assertGreater(info.last_snapshot.timestamp, old_timestamp)
self.assertGreater(info.last_startup_info.timestamp, old_timestamp)
def testWriteClientSnapshotHistoryDoesNotUpdateLastTimestampIfOlder(self):
client_id = db_test_utils.InitializeClient(self.db)
client_new = rdf_objects.ClientSnapshot(client_id=client_id)
client_new.kernel = "2.0.0"
self.db.WriteClientSnapshot(client_new)
new_timestamp = self.db.ReadClientSnapshot(client_id).timestamp
client_old = rdf_objects.ClientSnapshot(client_id=client_id)
client_old.kernel = "1.0.0"
client_old.timestamp = new_timestamp - rdfvalue.Duration.From(
1, rdfvalue.DAYS)
self.db.WriteClientSnapshotHistory([client_old])
info = self.db.ReadClientFullInfo(client_id)
self.assertEqual(info.last_snapshot.kernel, "2.0.0")
self.assertEqual(info.last_snapshot.timestamp, new_timestamp)
self.assertEqual(info.last_startup_info.timestamp, new_timestamp)
def testWriteClientSnapshotHistoryUpdatesOnlyLastClientTimestamp(self):
client_id = db_test_utils.InitializeClient(self.db)
client_old = rdf_objects.ClientSnapshot(client_id=client_id)
client_old.kernel = "1.0.0"
client_old.startup_info.client_info.client_name = "foo"
self.db.WriteClientSnapshot(client_old)
old_timestamp = self.db.ReadClientSnapshot(client_id).timestamp
startup_info = rdf_client.StartupInfo()
startup_info.client_info.client_name = "bar"
self.db.WriteClientStartupInfo(client_id, startup_info)
startup_timestamp = self.db.ReadClientStartupInfo(client_id).timestamp
client_new = rdf_objects.ClientSnapshot(client_id=client_id)
client_new.kernel = "2.0.0"
client_new.startup_info.client_info.client_name = "baz"
client_new.timestamp = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=old_timestamp, end_time=startup_timestamp)
self.db.WriteClientSnapshotHistory([client_new])
info = self.db.ReadClientFullInfo(client_id)
last_snapshot = info.last_snapshot
last_startup_info = info.last_startup_info
self.assertEqual(last_snapshot.kernel, "2.0.0")
self.assertEqual(last_snapshot.startup_info.client_info.client_name, "baz")
self.assertEqual(last_snapshot.timestamp, client_new.timestamp)
self.assertEqual(last_startup_info.client_info.client_name, "bar")
self.assertEqual(last_startup_info.timestamp, startup_timestamp)
def testWriteClientSnapshotHistoryRaiseTypeError(self):
client = rdf_objects.ClientMetadata()
client.os_version = "16.04"
client.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-04-10")
with self.assertRaisesRegex(TypeError, "Expected"):
self.db.WriteClientSnapshotHistory([client])
def testWriteClientSnapshotHistoryRaiseValueErrorOnEmpty(self):
with self.assertRaisesRegex(ValueError, "empty"):
self.db.WriteClientSnapshotHistory([])
def testWriteClientSnapshotHistoryRaiseValueErrorOnNonUniformIds(self):
client_id_a = db_test_utils.InitializeClient(self.db)
client_id_b = db_test_utils.InitializeClient(self.db)
client_a = rdf_objects.ClientSnapshot(client_id=client_id_a)
client_a.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-05-12")
client_b = rdf_objects.ClientSnapshot(client_id=client_id_b)
client_b.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2010-06-12")
with self.assertRaisesRegex(ValueError, "client id"):
self.db.WriteClientSnapshotHistory([client_a, client_b])
def testWriteClientSnapshotHistoryRaiseAttributeError(self):
client_id = db_test_utils.InitializeClient(self.db)
client = rdf_objects.ClientSnapshot(client_id=client_id)
client.kernel = "1.2.3"
client.startup_info.client_info.client_version = 42
with self.assertRaisesRegex(AttributeError, "timestamp"):
self.db.WriteClientSnapshotHistory([client])
def testWriteClientSnapshotHistoryRaiseOnNonExistingClient(self):
client_id = "C.0000000000000000"
client = rdf_objects.ClientSnapshot(client_id=client_id)
client.kernel = "1.2.3"
client.timestamp = rdfvalue.RDFDatetime.FromHumanReadable("2001-01-01")
with self.assertRaises(db.UnknownClientError) as context:
self.db.WriteClientSnapshotHistory([client])
self.assertEqual(context.exception.client_id, client_id)
def testClientStartupInfo(self):
"""StartupInfo is written to a separate table, make sure the merge works."""
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
client = rdf_objects.ClientSnapshot(client_id=client_id, kernel="12.3")
client.startup_info = rdf_client.StartupInfo(boot_time=123)
client.knowledge_base.fqdn = "test1234.examples.com"
d.WriteClientSnapshot(client)
client = d.ReadClientSnapshot(client_id)
self.assertEqual(client.startup_info.boot_time, 123)
client.kernel = "12.4"
client.startup_info = rdf_client.StartupInfo(boot_time=124)
d.WriteClientSnapshot(client)
client.kernel = "12.5"
client.startup_info = rdf_client.StartupInfo(boot_time=125)
d.WriteClientSnapshot(client)
hist = d.ReadClientSnapshotHistory(client_id)
self.assertLen(hist, 3)
startup_infos = [cl.startup_info for cl in hist]
self.assertEqual([si.boot_time for si in startup_infos], [125, 124, 123])
# StartupInfos written using WriteClient show up in the StartupInfoHistory.
history = d.ReadClientStartupInfoHistory(client_id)
self.assertLen(history, 3)
self.assertEqual(startup_infos, history)
def testClientSummary(self):
d = self.db
client_id_1 = db_test_utils.InitializeClient(self.db)
client_id_2 = db_test_utils.InitializeClient(self.db)
client_id_3 = db_test_utils.InitializeClient(self.db)
d.WriteClientSnapshot(
rdf_objects.ClientSnapshot(
client_id=client_id_1,
knowledge_base=rdf_client.KnowledgeBase(
fqdn="test1234.examples.com"),
kernel="12.3"))
d.WriteClientSnapshot(
rdf_objects.ClientSnapshot(
client_id=client_id_1,
knowledge_base=rdf_client.KnowledgeBase(
fqdn="test1234.examples.com"),
kernel="12.4"))
d.WriteClientSnapshot(
rdf_objects.ClientSnapshot(
client_id=client_id_2,
knowledge_base=rdf_client.KnowledgeBase(
fqdn="test1235.examples.com"),
kernel="12.4"))
hist = d.ReadClientSnapshotHistory(client_id_1)
self.assertLen(hist, 2)
# client_3 should be excluded - no snapshot yet
res = d.MultiReadClientSnapshot([client_id_1, client_id_2, client_id_3])
self.assertLen(res, 3)
self.assertIsInstance(res[client_id_1], rdf_objects.ClientSnapshot)
self.assertIsInstance(res[client_id_2], rdf_objects.ClientSnapshot)
self.assertIsInstance(res[client_id_1].timestamp, rdfvalue.RDFDatetime)
self.assertIsNotNone(res[client_id_2].timestamp)
self.assertEqual(res[client_id_1].knowledge_base.fqdn,
"test1234.examples.com")
self.assertEqual(res[client_id_1].kernel, "12.4")
self.assertEqual(res[client_id_2].knowledge_base.fqdn,
"test1235.examples.com")
self.assertFalse(res[client_id_3])
def testClientValidates(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(TypeError):
d.WriteClientSnapshot(client_id)
def testClientKeywords(self):
d = self.db
client_id_1 = db_test_utils.InitializeClient(self.db)
client_id_2 = db_test_utils.InitializeClient(self.db)
client_id_3 = db_test_utils.InitializeClient(self.db)
# Typical keywords are usernames and prefixes of hostnames.
d.AddClientKeywords(client_id_1, [
"joe", "machine.test.example1.com", "machine.test.example1",
"machine.test", "machine", "🚀"
])
d.AddClientKeywords(client_id_2, [
"fred", "machine.test.example2.com", "machine.test.example2",
"machine.test", "machine", "🚀🚀"
])
d.AddClientKeywords(client_id_3, ["foo", "bar", "baz"])
res = d.ListClientsForKeywords(["fred", "machine", "missing"])
self.assertEqual(res["fred"], [client_id_2])
self.assertCountEqual(res["machine"], [client_id_1, client_id_2])
self.assertEqual(res["missing"], [])
for kw, client_id in [("🚀", client_id_1), ("🚀🚀", client_id_2)]:
res = d.ListClientsForKeywords([kw])
self.assertEqual(
res[kw], [client_id],
"Expected [%s] when reading keyword %s, got %s" %
(client_id, kw, res[kw]))
def testClientKeywordsTimeRanges(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
d.AddClientKeywords(client_id, ["hostname1"])
change_time = rdfvalue.RDFDatetime.Now()
d.AddClientKeywords(client_id, ["hostname2"])
res = d.ListClientsForKeywords(["hostname1", "hostname2"],
start_time=change_time)
self.assertEqual(res["hostname1"], [])
self.assertEqual(res["hostname2"], [client_id])
def testRemoveClientKeyword(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
temporary_kw = "investigation42"
d.AddClientKeywords(client_id, [
"joe", "machine.test.example.com", "machine.test.example",
"machine.test", temporary_kw
])
self.assertEqual(
d.ListClientsForKeywords([temporary_kw])[temporary_kw], [client_id])
d.RemoveClientKeyword(client_id, temporary_kw)
self.assertEqual(d.ListClientsForKeywords([temporary_kw])[temporary_kw], [])
self.assertEqual(d.ListClientsForKeywords(["joe"])["joe"], [client_id])
def testClientLabels(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
self.assertEqual(d.ReadClientLabels(client_id), [])
d.AddClientLabels(client_id, "owner1", ["label1🚀"])
d.AddClientLabels(client_id, "owner2", ["label2", "label🚀3"])
all_labels = [
rdf_objects.ClientLabel(name="label1🚀", owner="owner1"),
rdf_objects.ClientLabel(name="label2", owner="owner2"),
rdf_objects.ClientLabel(name="label🚀3", owner="owner2")
]
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
self.assertEqual(d.ReadClientLabels("C.0000000000000002"), [])
# Can't hurt to insert this one again.
d.AddClientLabels(client_id, "owner1", ["label1🚀"])
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
d.RemoveClientLabels(client_id, "owner1", ["does not exist"])
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
# Label3 is actually owned by owner2.
d.RemoveClientLabels(client_id, "owner1", ["label🚀3"])
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
d.RemoveClientLabels(client_id, "owner2", ["label🚀3"])
self.assertEqual(
d.ReadClientLabels(client_id), [
rdf_objects.ClientLabel(name="label1🚀", owner="owner1"),
rdf_objects.ClientLabel(name="label2", owner="owner2"),
])
def testClientLabelsUnicode(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
self.assertEqual(d.ReadClientLabels(client_id), [])
d.AddClientLabels(client_id, "owner1", ["🚀🍰1"])
d.AddClientLabels(client_id, "owner2", ["🚀🍰2"])
d.AddClientLabels(client_id, "owner2", ["🚀🍰3"])
all_labels = [
rdf_objects.ClientLabel(name="🚀🍰1", owner="owner1"),
rdf_objects.ClientLabel(name="🚀🍰2", owner="owner2"),
rdf_objects.ClientLabel(name="🚀🍰3", owner="owner2")
]
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
d.RemoveClientLabels(client_id, "owner1", ["does not exist"])
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
# This label is actually owned by owner2.
d.RemoveClientLabels(client_id, "owner1", ["🚀🍰3"])
self.assertEqual(d.ReadClientLabels(client_id), all_labels)
d.RemoveClientLabels(client_id, "owner2", ["🚀🍰3"])
self.assertEqual(
d.ReadClientLabels(client_id), [
rdf_objects.ClientLabel(name="🚀🍰1", owner="owner1"),
rdf_objects.ClientLabel(name="🚀🍰2", owner="owner2")
])
def testLongClientLabelCanBeSaved(self):
label = "x" + "🚀" * (db.MAX_LABEL_LENGTH - 2) + "x"
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
d.AddClientLabels(client_id, "owner1", [label])
self.assertEqual(
d.ReadClientLabels(client_id), [
rdf_objects.ClientLabel(name=label, owner="owner1"),
])
def testTooLongClientLabelRaises(self):
label = "a" * (db.MAX_LABEL_LENGTH + 1)
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(ValueError):
d.AddClientLabels(client_id, "owner1", [label])
def testReadAllLabelsReturnsLabelsFromSingleClient(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
d.AddClientLabels(client_id, "owner1🚀", ["foo🚀"])
all_labels = d.ReadAllClientLabels()
self.assertEqual(all_labels,
[rdf_objects.ClientLabel(name="foo🚀", owner="owner1🚀")])
def testReadAllLabelsReturnsLabelsFromMultipleClients(self):
d = self.db
client_id_1 = db_test_utils.InitializeClient(self.db)
client_id_2 = db_test_utils.InitializeClient(self.db)
d.AddClientLabels(client_id_1, "owner1", ["foo"])
d.AddClientLabels(client_id_2, "owner1", ["foo"])
d.AddClientLabels(client_id_1, "owner2", ["bar"])
d.AddClientLabels(client_id_2, "owner2", ["bar"])
all_labels = sorted(d.ReadAllClientLabels(), key=lambda l: l.name)
self.assertEqual(all_labels, [
rdf_objects.ClientLabel(name="bar", owner="owner2"),
rdf_objects.ClientLabel(name="foo", owner="owner1")
])
def testReadClientStartupInfo(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
d.WriteClientStartupInfo(client_id, rdf_client.StartupInfo(boot_time=1337))
d.WriteClientStartupInfo(client_id, rdf_client.StartupInfo(boot_time=2000))
last_is = d.ReadClientStartupInfo(client_id)
self.assertIsInstance(last_is, rdf_client.StartupInfo)
self.assertEqual(last_is.boot_time, 2000)
self.assertIsInstance(last_is.timestamp, rdfvalue.RDFDatetime)
md = self.db.ReadClientMetadata(client_id)
self.assertEqual(md.startup_info_timestamp, last_is.timestamp)
def testReadClientStartupInfoNone(self):
client_id = db_test_utils.InitializeClient(self.db)
self.assertIsNone(self.db.ReadClientStartupInfo(client_id))
def testReadClientStartupInfoHistory(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
d.WriteClientStartupInfo(client_id, rdf_client.StartupInfo(boot_time=1))
d.WriteClientStartupInfo(client_id, rdf_client.StartupInfo(boot_time=2))
d.WriteClientStartupInfo(client_id, rdf_client.StartupInfo(boot_time=3))
hist = d.ReadClientStartupInfoHistory(client_id)
self.assertLen(hist, 3)
self.assertEqual([si.boot_time for si in hist], [3, 2, 1])
self.assertIsInstance(hist[0].timestamp, rdfvalue.RDFDatetime)
self.assertGreater(hist[0].timestamp, hist[1].timestamp)
self.assertGreater(hist[1].timestamp, hist[2].timestamp)
md = self.db.ReadClientMetadata(client_id)
self.assertEqual(md.startup_info_timestamp, hist[0].timestamp)
def testReadClientStartupInfoHistoryEmpty(self):
client_id = db_test_utils.InitializeClient(self.db)
self.assertEqual(self.db.ReadClientStartupInfoHistory(client_id), [])
def _SetUpReadClientStartupInfoHistoryTest(self):
d = self.db
self.client_id = db_test_utils.InitializeClient(self.db)
timestamps = [rdfvalue.RDFDatetime.Now()]
si = rdf_client.StartupInfo(boot_time=1)
d.WriteClientStartupInfo(self.client_id, si)
timestamps.append(d.ReadClientStartupInfo(self.client_id).timestamp)
timestamps.append(rdfvalue.RDFDatetime.Now())
si = rdf_client.StartupInfo(boot_time=2)
d.WriteClientStartupInfo(self.client_id, si)
timestamps.append(d.ReadClientStartupInfo(self.client_id).timestamp)
timestamps.append(rdfvalue.RDFDatetime.Now())
return timestamps
def testReadClientStartupInfoHistoryWithEmptyTimerange(self):
d = self.db
self._SetUpReadClientStartupInfoHistoryTest()
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(None, None))
self.assertLen(hist, 2)
self.assertEqual(hist[0].boot_time, 2)
self.assertEqual(hist[1].boot_time, 1)
def testReadClientStartupInfoHistoryWithTimerangeWithBothFromTo(self):
d = self.db
ts = self._SetUpReadClientStartupInfoHistoryTest()
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[0], ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 1)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[2], ts[4]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 2)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[0], ts[4]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].boot_time, 2)
self.assertEqual(hist[1].boot_time, 1)
def testReadClientStartupInfoHistoryWithTimerangeWithFromOnly(self):
d = self.db
ts = self._SetUpReadClientStartupInfoHistoryTest()
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[0], None))
self.assertLen(hist, 2)
self.assertEqual(hist[0].boot_time, 2)
self.assertEqual(hist[1].boot_time, 1)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[2], None))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 2)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[4], None))
self.assertEmpty(hist)
def testReadClientStartupInfoHistoryWithTimerangeWithToOnly(self):
d = self.db
ts = self._SetUpReadClientStartupInfoHistoryTest()
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(None, ts[0]))
self.assertEmpty(hist)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(None, ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 1)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(None, ts[4]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].boot_time, 2)
self.assertEqual(hist[1].boot_time, 1)
def testReadClientStartupInfoHistoryWithTimerangeEdgeCases(self):
# Timerange should work as [from, to]. I.e. "from" is inclusive and "to"
# is inclusive.
d = self.db
ts = self._SetUpReadClientStartupInfoHistoryTest()
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[1], ts[1]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 1)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[1], ts[2]))
self.assertLen(hist, 1)
self.assertEqual(hist[0].boot_time, 1)
hist = d.ReadClientStartupInfoHistory(
self.client_id, timerange=(ts[1], ts[3]))
self.assertLen(hist, 2)
self.assertEqual(hist[0].boot_time, 2)
self.assertEqual(hist[1].boot_time, 1)
def testCrashHistory(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
ci = rdf_client.ClientCrash(timestamp=12345, crash_message="Crash #1")
d.WriteClientCrashInfo(client_id, ci)
ci.crash_message = "Crash #2"
d.WriteClientCrashInfo(client_id, ci)
ci.crash_message = "Crash #3"
d.WriteClientCrashInfo(client_id, ci)
last_is = d.ReadClientCrashInfo(client_id)
self.assertIsInstance(last_is, rdf_client.ClientCrash)
self.assertEqual(last_is.crash_message, "Crash #3")
self.assertIsInstance(last_is.timestamp, rdfvalue.RDFDatetime)
hist = d.ReadClientCrashInfoHistory(client_id)
self.assertLen(hist, 3)
self.assertEqual([ci.crash_message for ci in hist],
["Crash #3", "Crash #2", "Crash #1"])
self.assertIsInstance(hist[0].timestamp, rdfvalue.RDFDatetime)
self.assertGreater(hist[0].timestamp, hist[1].timestamp)
self.assertGreater(hist[1].timestamp, hist[2].timestamp)
md = self.db.ReadClientMetadata(client_id)
self.assertEqual(md.last_crash_timestamp, hist[0].timestamp)
self.assertIsNone(d.ReadClientCrashInfo("C.0000000000000000"))
self.assertEqual(d.ReadClientCrashInfoHistory("C.0000000000000000"), [])
def testEmptyCrashHistory(self):
client_id = "C.0000000050000001"
self.assertIsNone(self.db.ReadClientCrashInfo(client_id))
self.assertEqual(self.db.ReadClientCrashInfoHistory(client_id), [])
def testReadClientFullInfoPartialReads(self):
client_id = db_test_utils.InitializeClient(self.db)
self.assertIsNotNone(self.db.ReadClientFullInfo(client_id))
def testReadClientFullInfoReturnsCorrectResult(self):
d = self.db
client_id = db_test_utils.InitializeClient(self.db)
cl = rdf_objects.ClientSnapshot(
client_id=client_id,
knowledge_base=rdf_client.KnowledgeBase(fqdn="test1234.examples.com"),
kernel="12.3")
d.WriteClientSnapshot(cl)
d.WriteClientMetadata(client_id, certificate=CERT)
si = rdf_client.StartupInfo(boot_time=1)
d.WriteClientStartupInfo(client_id, si)
d.AddClientLabels(client_id, "test_owner", ["test_label"])
full_info = d.ReadClientFullInfo(client_id)
self.assertEqual(full_info.last_snapshot, cl)
self.assertEqual(full_info.metadata.certificate, CERT)
self.assertEqual(full_info.last_startup_info, si)
self.assertEqual(
full_info.labels,
[rdf_objects.ClientLabel(owner="test_owner", name="test_label")])
def _SetupFullInfoClients(self):
for i in range(10):
client_id = db_test_utils.InitializeClient(self.db,
"C.000000005000000%d" % i)
cl = rdf_objects.ClientSnapshot(
client_id=client_id,
knowledge_base=rdf_client.KnowledgeBase(fqdn="test%d.examples.com" %
i),
kernel="12.3.%d" % i)
self.db.WriteClientSnapshot(cl)
self.db.WriteClientMetadata(client_id, certificate=CERT)
si = rdf_client.StartupInfo(boot_time=i)
self.db.WriteClientStartupInfo(client_id, si)
self.db.AddClientLabels(
client_id, "test_owner",
["test_label-a-%d" % i, "test_label-b-%d" % i])
def _VerifySnapshots(self, snapshots):
snapshots = sorted(snapshots, key=lambda s: s.client_id)
self.assertLen(snapshots, 10)
for i, s in enumerate(snapshots):
self.assertEqual(s.client_id, "C.000000005000000%d" % i)
self.assertEqual(s.knowledge_base.fqdn, "test%d.examples.com" % i)
def _VerifyFullInfos(self, c_infos):
c_infos = sorted(c_infos, key=lambda c: c.last_snapshot.client_id)
for i, full_info in enumerate(c_infos):
self.assertEqual(full_info.last_snapshot.client_id,
"C.000000005000000%d" % i)
self.assertEqual(full_info.metadata.certificate, CERT)
self.assertEqual(full_info.last_startup_info.boot_time, i)
self.assertEqual(
sorted(full_info.labels, key=lambda l: l.name), [
rdf_objects.ClientLabel(
owner="test_owner", name="test_label-a-%d" % i),
rdf_objects.ClientLabel(
owner="test_owner", name="test_label-b-%d" % i)
])
def testIterateAllClientsFullInfo(self):
self._SetupFullInfoClients()
self._VerifyFullInfos(self.db.IterateAllClientsFullInfo())
def testIterateAllClientsFullInfoSmallBatches(self):
self._SetupFullInfoClients()
self._VerifyFullInfos(self.db.IterateAllClientsFullInfo(batch_size=2))
def testIterateAllClientSnapshots(self):
self._SetupFullInfoClients()
snapshots = self.db.IterateAllClientSnapshots()
self._VerifySnapshots(snapshots)
def testIterateAllClientSnapshotsSmallBatches(self):
self._SetupFullInfoClients()
snapshots = self.db.IterateAllClientSnapshots(batch_size=2)
self._VerifySnapshots(snapshots)
def _SetupLastPingClients(self, now):
time_past = now - rdfvalue.Duration.From(1, rdfvalue.DAYS)
client_ids_to_ping = {}
for i in range(10):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WriteClientSnapshot(
rdf_objects.ClientSnapshot(client_id=client_id))
ping = (time_past if i % 2 == 0 else now)
self.db.WriteClientMetadata(client_id, last_ping=ping)
client_ids_to_ping[client_id] = ping
return client_ids_to_ping
def testMultiReadClientsFullInfoFiltersClientsByLastPingTime(self):
d = self.db
base_time = rdfvalue.RDFDatetime.Now()
cutoff_time = base_time - rdfvalue.Duration.From(1, rdfvalue.SECONDS)
client_ids_to_ping = self._SetupLastPingClients(base_time)
expected_client_ids = [
cid for cid, ping in iteritems(client_ids_to_ping) if ping == base_time
]
full_infos = d.MultiReadClientFullInfo(
list(iterkeys(client_ids_to_ping)), min_last_ping=cutoff_time)
self.assertCountEqual(expected_client_ids, full_infos)
def testMultiReadClientsFullInfoWithEmptyList(self):
d = self.db
self.assertEqual(d.MultiReadClientFullInfo([]), {})
def testMultiReadClientsFullInfoSkipsMissingClients(self):
d = self.db
present_client_id = "C.fc413187fefa1dcf"
# Typical initial FS enabled write
d.WriteClientMetadata(present_client_id, fleetspeak_enabled=True)
missing_client_id = "C.00413187fefa1dcf"
full_infos = d.MultiReadClientFullInfo(
[present_client_id, missing_client_id])
self.assertEqual(list(iterkeys(full_infos)), [present_client_id])
def testMultiReadClientsFullInfoNoSnapshot(self):
d = self.db
client_id = "C.fc413187fefa1dcf"
d.WriteClientMetadata(client_id, fleetspeak_enabled=True)
full_info = d.MultiReadClientFullInfo([client_id])[client_id]
expected_snapshot = rdf_objects.ClientSnapshot(client_id=client_id)
self.assertEqual(full_info.last_snapshot, expected_snapshot)
def testReadClientMetadataRaisesWhenClientIsMissing(self):
with self.assertRaises(db.UnknownClientError):
self.db.ReadClientMetadata("C.00413187fefa1dcf")
def testReadClientFullInfoRaisesWhenClientIsMissing(self):
with self.assertRaises(db.UnknownClientError):
self.db.ReadClientFullInfo("C.00413187fefa1dcf")
def _SetupClientStats(self):
db_test_utils.InitializeClient(self.db, "C.0000000000000001")
db_test_utils.InitializeClient(self.db, "C.0000000000000002")
offsets = [
rdfvalue.Duration.From(0, rdfvalue.SECONDS),
rdfvalue.Duration.From(1, rdfvalue.SECONDS),
db.CLIENT_STATS_RETENTION,
db.CLIENT_STATS_RETENTION + rdfvalue.Duration.From(1, rdfvalue.SECONDS),
]
now = rdfvalue.RDFDatetime.Now()
for offset_i, offset in enumerate(offsets):
with test_lib.FakeTime(now - offset):
for client_id in [1, 2]:
stats = rdf_client_stats.ClientStats(
RSS_size=offset_i,
VMS_size=client_id,
timestamp=rdfvalue.RDFDatetime.Now())
self.db.WriteClientStats("C.%016x" % client_id, stats)
return now
def testReadEmptyClientStats(self):
client_id_1 = db_test_utils.InitializeClient(self.db)
client_id_2 = db_test_utils.InitializeClient(self.db)
self.assertEmpty(self.db.ReadClientStats(client_id_1))
self.assertEmpty(self.db.ReadClientStats(client_id_2))
self.db.WriteClientStats(client_id_2, rdf_client_stats.ClientStats())
self.assertEmpty(self.db.ReadClientStats(client_id_1))
self.assertNotEmpty(self.db.ReadClientStats(client_id_2))
def testReadClientStats(self):
client_id_1 = db_test_utils.InitializeClient(self.db)
client_id_2 = db_test_utils.InitializeClient(self.db)
self.db.WriteClientStats(
client_id_1,
rdf_client_stats.ClientStats(
RSS_size=1,
VMS_size=5,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5)))
self.db.WriteClientStats(
client_id_1, rdf_client_stats.ClientStats(RSS_size=2, VMS_size=6))
self.db.WriteClientStats(
client_id_2, rdf_client_stats.ClientStats(RSS_size=3, VMS_size=7))
self.db.WriteClientStats(
client_id_2, rdf_client_stats.ClientStats(RSS_size=4, VMS_size=8))
stats = self.db.ReadClientStats(
client_id=client_id_1,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
self.assertEqual(stats[0].RSS_size, 1)
self.assertEqual(stats[0].VMS_size, 5)
self.assertEqual(stats[0].timestamp,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(5))
self.assertEqual(stats[1].RSS_size, 2)
self.assertEqual(stats[1].VMS_size, 6)
stats = self.db.ReadClientStats(
client_id=client_id_2,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
self.assertEqual(stats[0].RSS_size, 3)
self.assertEqual(stats[0].VMS_size, 7)
self.assertEqual(stats[1].RSS_size, 4)
self.assertEqual(stats[1].VMS_size, 8)
def testReadClientStatsReturnsOrderedList(self):
client_id = db_test_utils.InitializeClient(self.db)
sorted_stats = [rdf_client_stats.ClientStats(RSS_size=i) for i in range(10)]
for stats in sorted_stats:
self.db.WriteClientStats(client_id, stats)
self.assertEqual(self.db.ReadClientStats(client_id=client_id), sorted_stats)
def testReadClientStatsAfterRetention(self):
now = self._SetupClientStats()
with test_lib.FakeTime(now):
stats = self.db.ReadClientStats("C.0000000000000001")
self.assertCountEqual([0, 1, 2], [st.RSS_size for st in stats])
stats = self.db.ReadClientStats("C.0000000000000002")
self.assertCountEqual([0, 1, 2], [st.RSS_size for st in stats])
def testWriteInvalidClientStats(self):
with self.assertRaises(ValueError):
self.db.WriteClientStats("C.000000000000000xx",
rdf_client_stats.ClientStats())
with self.assertRaises(TypeError):
self.db.WriteClientStats("C.0000000000000001", None)
with self.assertRaises(TypeError):
self.db.WriteClientStats("C.0000000000000001", {"RSS_size": 0})
def testWriteClientStatsForNonExistingClient(self):
with self.assertRaises(db.UnknownClientError) as context:
self.db.WriteClientStats("C.0000000000000005",
rdf_client_stats.ClientStats())
self.assertEqual(context.exception.client_id, "C.0000000000000005")
def testDeleteOldClientStats(self):
now = self._SetupClientStats()
with test_lib.FakeTime(now):
deleted = list(self.db.DeleteOldClientStats(yield_after_count=100))
self.assertEqual([2], deleted)
stats = self.db.ReadClientStats(
client_id="C.0000000000000001",
# MySQL TIMESTAMP's valid range starts from 1970-01-01 00:00:01,
# hence we have to specify the minimal min_timestamp as
# 1 seconds from epoch.
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
self.assertCountEqual([0, 1, 2], [st.RSS_size for st in stats])
stats = self.db.ReadClientStats(
client_id="C.0000000000000002",
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
self.assertCountEqual([0, 1, 2], [st.RSS_size for st in stats])
def _TestDeleteOldClientStatsYields(self, total, yield_after_count,
yields_expected):
db_test_utils.InitializeClient(self.db, "C.0000000000000001")
now = rdfvalue.RDFDatetime.Now()
for i in range(1, total + 1):
with test_lib.FakeTime(now - db.CLIENT_STATS_RETENTION -
rdfvalue.Duration.From(i, rdfvalue.SECONDS)):
self.db.WriteClientStats("C.0000000000000001",
rdf_client_stats.ClientStats())
yields = []
with test_lib.FakeTime(now):
for deleted in self.db.DeleteOldClientStats(
yield_after_count=yield_after_count):
yields.append(deleted)
self.assertEqual(yields, yields_expected)
stats = self.db.ReadClientStats(
client_id="C.0000000000000001",
# MySQL TIMESTAMP's valid range starts from 1970-01-01 00:00:01,
# hence we have to specify the minimal min_timestamp as
# 1 seconds from epoch.
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
self.assertEmpty(stats)
def testDeleteOldClientStatsDoesNotYieldIfEmpty(self):
self._TestDeleteOldClientStatsYields(
total=0, yield_after_count=10, yields_expected=[])
def testDeleteOldClientStatsYieldsExactMatch(self):
self._TestDeleteOldClientStatsYields(
total=10, yield_after_count=5, yields_expected=[5, 5])
def testDeleteOldClientStatsYieldsAtLeastOnce(self):
self._TestDeleteOldClientStatsYields(
total=10, yield_after_count=20, yields_expected=[10])
def testDeleteOldClientStatsYieldsUnexactMatch(self):
self._TestDeleteOldClientStatsYields(
total=10, yield_after_count=4, yields_expected=[4, 4, 2])
def _WriteTestClientsWithData(self,
client_indices,
last_ping=None,
client_name=None,
client_version=None,
os=None,
os_release=None,
os_version=None,
labels_dict=None):
for index in client_indices:
client_id = "C.1%015x" % index
self.db.WriteClientMetadata(
client_id, last_ping=last_ping, fleetspeak_enabled=False)
self.db.WriteClientSnapshot(
rdf_objects.ClientSnapshot(
client_id=client_id,
startup_info=rdf_client.StartupInfo(
client_info=rdf_client.ClientInformation(
client_name=client_name, client_version=client_version)),
knowledge_base=rdf_client.KnowledgeBase(os=os),
os_release=os_release,
os_version=os_version))
for owner, labels in iteritems(labels_dict):
self.db.AddClientLabels(client_id, owner=owner, labels=labels)
def _WriteTestDataForFleetStatsTesting(self):
self._WriteTestClientsWithData(
range(0, 5),
last_ping=_DaysSinceEpoch(32),
client_name="GRR",
client_version=1111,
os="Linux",
os_release="Ubuntu",
os_version="16.04",
labels_dict={
"GRR": ["grr-foo", "grr-bar"],
"tester": ["tester-foo", "tester-bar"]
})
self._WriteTestClientsWithData(
range(5, 7),
last_ping=None,
client_name="GRR",
client_version=1111,
os="Linux",
os_release="Ubuntu",
os_version="16.04",
labels_dict={"GRR": ["grr-foo", "grr-bar"]})
self._WriteTestClientsWithData(
range(7, 10),
last_ping=_DaysSinceEpoch(38),
client_name="GRR",
client_version=2222,
os="Linux",
os_release="Ubuntu",
os_version="18.04",
labels_dict={"GRR": ["grr-foo", "grr-bar"]})
self._WriteTestClientsWithData(
range(10, 13),
last_ping=_DaysSinceEpoch(43),
client_name="GRR",
client_version=1111,
os="Darwin",
os_release="OSX",
os_version="10.12.2",
labels_dict={"GRR": ["grr-foo", "grr-bar", "grr-baz"]})
self._WriteTestClientsWithData(
range(13, 14),
last_ping=_DaysSinceEpoch(15),
client_name="GRR",
client_version=1111,
os="Darwin",
os_release="OSX",
os_version="10.12.2",
labels_dict={}) # Client has no labels.
self._WriteTestClientsWithData(
range(14, 15),
last_ping=_DaysSinceEpoch(15),
client_name="GRR",
client_version=1111,
os="Darwin",
os_release="OSX",
os_version="10.12.2",
labels_dict={"tester": ["tester-foo"]}) # Client has no GRR labels.
# Client with missing data.
self._WriteTestClientsWithData(
range(15, 16),
last_ping=_DaysSinceEpoch(15),
labels_dict={"GRR": ["grr-foo"]})
self._WriteTestClientsWithData(
range(16, 17),
last_ping=_DaysSinceEpoch(1), # Ancient ping timestamp.
client_name="GRR",
client_version=1111,
os="Linux",
os_release="Ubuntu",
os_version="16.04",
labels_dict={"GRR": ["grr-foo", "grr-bar"]})
def testCountClientVersionStringsByLabel(self):
self._WriteTestDataForFleetStatsTesting()
with test_lib.FakeTime(_DaysSinceEpoch(44)):
fleet_stats = self.db.CountClientVersionStringsByLabel({1, 2, 8, 30})
for client_label in fleet_stats.GetAllLabels():
self.assertIsInstance(client_label, Text)
expected_label_counts = {
(2, "grr-foo", "GRR 1111"): 3,
(2, "grr-bar", "GRR 1111"): 3,
(2, "grr-baz", "GRR 1111"): 3,
(8, "grr-foo", "GRR 1111"): 3,
(8, "grr-bar", "GRR 1111"): 3,
(8, "grr-baz", "GRR 1111"): 3,
(8, "grr-foo", "GRR 2222"): 3,
(8, "grr-bar", "GRR 2222"): 3,
(30, "grr-foo", "GRR 1111"): 8,
(30, "grr-bar", "GRR 1111"): 8,
(30, "grr-baz", "GRR 1111"): 3,
(30, "grr-foo", "GRR 2222"): 3,
(30, "grr-bar", "GRR 2222"): 3,
(30, "grr-foo", " Unknown-GRR-version"): 1,
}
expected_total_counts = {
(2, "GRR 1111"): 3,
(8, "GRR 1111"): 3,
(8, "GRR 2222"): 3,
(30, "GRR 1111"): 10,
(30, "GRR 2222"): 3,
(30, " Unknown-GRR-version"): 1,
}
self.assertDictEqual(fleet_stats.GetFlattenedLabelCounts(),
expected_label_counts)
self.assertDictEqual(fleet_stats.GetFlattenedTotalCounts(),
expected_total_counts)
def testCountClientPlatformsByLabel(self):
self._WriteTestDataForFleetStatsTesting()
with test_lib.FakeTime(_DaysSinceEpoch(44)):
fleet_stats = self.db.CountClientPlatformsByLabel({1, 2, 8, 30})
for client_label in fleet_stats.GetAllLabels():
self.assertIsInstance(client_label, Text)
expected_label_counts = {
(2, "grr-foo", "Darwin"): 3,
(2, "grr-bar", "Darwin"): 3,
(2, "grr-baz", "Darwin"): 3,
(8, "grr-foo", "Darwin"): 3,
(8, "grr-bar", "Darwin"): 3,
(8, "grr-baz", "Darwin"): 3,
(8, "grr-foo", "Linux"): 3,
(8, "grr-bar", "Linux"): 3,
(30, "grr-foo", "Darwin"): 3,
(30, "grr-bar", "Darwin"): 3,
(30, "grr-baz", "Darwin"): 3,
(30, "grr-foo", "Linux"): 8,
(30, "grr-bar", "Linux"): 8,
(30, "grr-foo", ""): 1,
}
expected_total_counts = {
(2, "Darwin"): 3,
(8, "Darwin"): 3,
(8, "Linux"): 3,
(30, "Darwin"): 5,
(30, "Linux"): 8,
(30, ""): 1,
}
self.assertDictEqual(fleet_stats.GetFlattenedLabelCounts(),
expected_label_counts)
self.assertDictEqual(fleet_stats.GetFlattenedTotalCounts(),
expected_total_counts)
def testCountClientPlatformReleasesByLabel(self):
self._WriteTestDataForFleetStatsTesting()
with test_lib.FakeTime(_DaysSinceEpoch(44)):
fleet_stats = self.db.CountClientPlatformReleasesByLabel({1, 2, 8, 30})
for client_label in fleet_stats.GetAllLabels():
self.assertIsInstance(client_label, Text)
expected_label_counts = {
(2, "grr-foo", "Darwin-OSX-10.12.2"): 3,
(2, "grr-bar", "Darwin-OSX-10.12.2"): 3,
(2, "grr-baz", "Darwin-OSX-10.12.2"): 3,
(8, "grr-foo", "Darwin-OSX-10.12.2"): 3,
(8, "grr-bar", "Darwin-OSX-10.12.2"): 3,
(8, "grr-baz", "Darwin-OSX-10.12.2"): 3,
(8, "grr-foo", "Linux-Ubuntu-18.04"): 3,
(8, "grr-bar", "Linux-Ubuntu-18.04"): 3,
(30, "grr-foo", "Darwin-OSX-10.12.2"): 3,
(30, "grr-bar", "Darwin-OSX-10.12.2"): 3,
(30, "grr-baz", "Darwin-OSX-10.12.2"): 3,
(30, "grr-foo", "Linux-Ubuntu-18.04"): 3,
(30, "grr-bar", "Linux-Ubuntu-18.04"): 3,
(30, "grr-foo", "Linux-Ubuntu-16.04"): 5,
(30, "grr-bar", "Linux-Ubuntu-16.04"): 5,
(30, "grr-foo", "--"): 1,
}
expected_total_counts = {
(2, "Darwin-OSX-10.12.2"): 3,
(8, "Darwin-OSX-10.12.2"): 3,
(8, "Linux-Ubuntu-18.04"): 3,
(30, "Darwin-OSX-10.12.2"): 5,
(30, "Linux-Ubuntu-16.04"): 5,
(30, "Linux-Ubuntu-18.04"): 3,
(30, "--"): 1,
}
self.assertDictEqual(fleet_stats.GetFlattenedLabelCounts(),
expected_label_counts)
self.assertDictEqual(fleet_stats.GetFlattenedTotalCounts(),
expected_total_counts)
@mock.patch.object(db, "_MAX_GRR_VERSION_LENGTH", 10)
def testWriteClientSnapshotLongGRRVersion(self):
snapshot = rdf_objects.ClientSnapshot(client_id="C.0000000000000001")
snapshot.startup_info.client_info.client_description = "🚀" * 12
snapshot.startup_info.client_info.client_version = 1234
with self.assertRaises(db.StringTooLongError):
self.db.WriteClientSnapshot(snapshot)
@mock.patch.object(db, "_MAX_CLIENT_PLATFORM_LENGTH", 10)
def testWriteClientSnapshotLongPlatform(self):
snapshot = rdf_objects.ClientSnapshot(client_id="C.0000000000000001")
snapshot.knowledge_base.os = "🚀" * 12
with self.assertRaises(db.StringTooLongError):
self.db.WriteClientSnapshot(snapshot)
@mock.patch.object(db, "_MAX_CLIENT_PLATFORM_RELEASE_LENGTH", 10)
def testWriteClientSnapshotLongPlatformRelease(self):
snapshot = rdf_objects.ClientSnapshot(client_id="C.0000000000000001")
snapshot.knowledge_base.os = "🚀" * 12
with self.assertRaises(db.StringTooLongError):
self.db.WriteClientSnapshot(snapshot)
def _AddClientKeyedData(self, client_id):
# Client labels.
self.db.AddClientLabels(client_id, "testowner", ["label"])
# Client snapshot including client startup info.
snapshot = rdf_objects.ClientSnapshot(client_id=client_id)
snapshot.startup_info.client_info.client_version = 42
self.db.WriteClientSnapshot(snapshot)
# Crash information
self.db.WriteClientCrashInfo(
client_id,
rdf_client.ClientCrash(timestamp=12345, crash_message="Crash #1"))
# Index keywords.
self.db.AddClientKeywords(client_id, ["machine.test.example1.com"])
# Client stats.
self.db.WriteClientStats(
client_id, rdf_client_stats.ClientStats(RSS_size=10, VMS_size=123))
# A flow.
flow_id = flow.RandomFlowId()
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now()))
# A flow request.
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
# A flow response.
self.db.WriteFlowResponses([
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=1)
])
# A flow processing request.
self.db.WriteFlowProcessingRequests(
[rdf_flows.FlowProcessingRequest(client_id=client_id, flow_id=flow_id)])
# A client action request.
self.db.WriteClientActionRequests([
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
return flow_id
def _CheckClientKeyedDataWasDeleted(self, client_id, flow_id):
# Client labels.
self.assertEmpty(self.db.ReadClientLabels(client_id))
# Client snapshot including client startup info.
self.assertIsNone(self.db.ReadClientSnapshot(client_id))
self.assertIsNone(self.db.ReadClientStartupInfo(client_id))
# Crash information
self.assertIsNone(self.db.ReadClientCrashInfo(client_id))
# Index keywords.
res = self.db.ListClientsForKeywords(["machine.test.example1.com"])
self.assertEqual(res, {"machine.test.example1.com": []})
# Client stats.
self.assertEmpty(self.db.ReadClientStats(client_id))
# A flow.
with self.assertRaises(db.UnknownFlowError):
self.db.ReadFlowObject(client_id, flow_id)
# A client action request.
self.assertEmpty(self.db.ReadAllClientActionRequests(client_id))
def testDeleteClient(self):
client_id = db_test_utils.InitializeClient(self.db)
# Add some data that will be stored with the client id as foreign key. None
# of this additional data should stop the client from being deleted.
flow_id = self._AddClientKeyedData(client_id)
self.db.DeleteClient(client_id=client_id)
with self.assertRaises(db.UnknownClientError):
self.db.ReadClientMetadata(client_id)
self._CheckClientKeyedDataWasDeleted(client_id, flow_id)
def testDeleteNonExistingClient(self):
client_id = "C.0000000000000000"
with self.assertRaises(db.UnknownClientError):
self.db.DeleteClient(client_id=client_id)
def testDeleteClientNoAdditionalData(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.DeleteClient(client_id=client_id)
with self.assertRaises(db.UnknownClientError):
self.db.ReadClientMetadata(client_id)
# This file is a test library and thus does not require a __main__ block.
| apache-2.0 | -512,624,584,033,677,000 | 38.0011 | 80 | 0.688537 | false |
botify-labs/simpleflow | swf/querysets/history.py | 1 | 2268 | from swf.models import History
from swf.querysets.base import BaseQuerySet
class HistoryQuerySet(BaseQuerySet):
"""WorkflowExecution history queryset"""
def __init__(self, domain, *args, **kwargs):
super(HistoryQuerySet, self).__init__(*args, **kwargs)
self.domain = domain
def get(self, run_id, workflow_id, max_results=None, page_size=100, reverse=False):
"""Retrieves a WorkflowExecution history
:param run_id: unique identifier of the workflow execution
:type run_id: string
:param workflow_id: The user defined identifier associated with the workflow execution
:type workflow_id: string
:param max_results: Max output history size. Retrieved history will be shrinked
if it's size is greater than max_results.
:type max_results: int
:param page_size: Swf api response page size: controls how many history events
will be returned at each requests. Keep in mind that until
max_results history size is reached, next pages will be
requested.
:type page_size: int
:param reverse: Should the history events be retrieved in reverse order.
:type reverse: bool
"""
max_results = max_results or page_size
if max_results < page_size:
page_size = max_results
response = self.connection.get_workflow_execution_history(
self.domain.name,
run_id,
workflow_id,
maximum_page_size=page_size,
reverse_order=reverse,
)
events = response["events"]
next_page = response.get("nextPageToken")
while next_page is not None and len(events) < max_results:
response = self.connection.get_workflow_execution_history(
self.domain.name,
run_id,
workflow_id,
maximum_page_size=page_size,
next_page_token=next_page,
reverse_order=reverse,
)
events.extend(response["events"])
next_page = response.get("nextPageToken")
return History.from_event_list(events)
| mit | -425,559,705,143,165,500 | 36.180328 | 95 | 0.592152 | false |
ic-hep/DIRAC | Core/Utilities/ClassAd/ClassAdLight.py | 1 | 8864 | ########################################################################
# $HeadURL$
########################################################################
""" ClassAd Class - a light purely Python representation of the
Condor ClassAd library.
"""
__RCSID__ = "$Id$"
class ClassAd:
def __init__( self, jdl ):
"""ClassAd constructor from a JDL string
"""
self.contents = {}
result = self.__analyse_jdl( jdl )
if result:
self.contents = result
def __analyse_jdl( self, jdl, index = 0 ):
"""Analyse one [] jdl enclosure
"""
jdl = jdl.strip()
# Strip all the blanks first
#temp = jdl.replace(' ','').replace('\n','')
temp = jdl
result = {}
if temp[0] != '[' or temp[-1] != ']':
print "Invalid JDL: it should start with [ and end with ]"
return result
# Parse the jdl string now
body = temp[1:-1]
index = 0
namemode = 1
valuemode = 0
while index < len( body ):
if namemode:
ind = body.find( "=", index )
if ind != -1:
name = body[index:ind]
index = ind + 1
valuemode = 1
namemode = 0
else:
break
elif valuemode:
ind1 = body.find( "[", index )
ind2 = body.find( ";", index )
if ind1 != -1 and ind1 < ind2:
value, newind = self.__find_subjdl( body, ind1 )
elif ind1 == -1 and ind2 == -1:
value = body[index:]
newind = len( body )
else:
if index == ind2:
return {}
else:
value = body[index:ind2]
newind = ind2 + 1
result[name.strip()] = value.strip().replace( '\n', '' )
index = newind
valuemode = 0
namemode = 1
return result
def __find_subjdl( self, body, index ):
""" Find a full [] enclosure starting from index
"""
result = ''
if body[index] != '[':
return ( result, 0 )
depth = 0
ind = index
while ( depth < 10 ):
ind1 = body.find( ']', ind + 1 )
ind2 = body.find( '[', ind + 1 )
if ind2 != -1 and ind2 < ind1:
depth += 1
ind = ind2
else:
if depth > 0:
depth -= 1
ind = ind1
else:
result = body[index:ind1 + 1]
if body[ind1 + 1] == ";":
return ( result, ind1 + 2 )
else:
return result, 0
return result, 0
def insertAttributeInt( self, name, attribute ):
"""Insert a named integer attribute
"""
self.contents[name] = str( attribute )
def insertAttributeBool( self, name, attribute ):
"""Insert a named boolean attribute
"""
if attribute:
self.contents[name] = 'true'
else:
self.contents[name] = 'false'
def insertAttributeString( self, name, attribute ):
"""Insert a named string attribute
"""
self.contents[name] = '"' + str( attribute ) + '"'
def insertAttributeVectorString( self, name, attributelist ):
"""Insert a named string list attribute
"""
tmp = map ( lambda x : '"' + x + '"', attributelist )
tmpstr = ','.join( tmp )
self.contents[name] = '{' + tmpstr + '}'
def insertAttributeVectorInt( self, name, attributelist ):
"""Insert a named string list attribute
"""
tmp = map ( lambda x : str( x ), attributelist )
tmpstr = ','.join( tmp )
self.contents[name] = '{' + tmpstr + '}'
def insertAttributeVectorStringList( self, name, attributelist ):
"""Insert a named list of string lists
"""
listOfLists = []
for stringList in attributelist:
#tmp = map ( lambda x : '"' + x + '"', stringList )
tmpstr = ','.join( stringList )
listOfLists.append('{' + tmpstr + '}')
self.contents[name] = '{' + ','.join(listOfLists) + '}'
def lookupAttribute( self, name ):
"""Check the presence of the given attribute
"""
return self.contents.has_key( name )
def set_expression( self, name, attribute ):
"""Insert a named expression attribute
"""
self.contents[name] = str( attribute )
def get_expression( self, name ):
"""Get expression corresponding to a named attribute
"""
if self.contents.has_key( name ):
if isinstance( self.contents[name], ( int, long ) ):
return str( self.contents[name] )
else :
return self.contents[name]
else:
return ""
def isAttributeList( self, name ):
""" Check if the given attribute is of the List type
"""
attribute = self.get_expression( name ).strip()
return attribute.startswith( '{' )
def getListFromExpression( self, name ):
""" Get a list of strings from a given expression
"""
tempString = self.get_expression( name ).strip()
listMode = False
if tempString.startswith('{'):
tempString = tempString[1:-1]
listMode = True
tempString = tempString.replace( " ", "" ).replace( '\n','' )
if tempString.find('{') < 0:
if not listMode:
tempString = tempString.replace( "\"", "" )
return tempString.split( ',' )
resultList = []
while tempString:
if tempString.find( '{' ) == 0 :
end = tempString.find( '}' )
resultList.append(tempString[:end+1])
tempString = tempString[end+1:]
if tempString.startswith(','):
tempString = tempString[1:]
elif tempString.find( '"' ) == 0 :
end = tempString[1:].find( '"' )
resultList.append( tempString[1:end+1] )
tempString = tempString[end+2:]
if tempString.startswith(','):
tempString = tempString[1:]
else:
end = tempString.find( ',' )
if end < 0:
resultList.append( tempString.replace( "\"", "" ).replace( " ", "" ) )
break
else:
resultList.append( tempString[:end].replace( "\"", "" ).replace( " ", "" ) )
tempString = tempString[end+1:]
return resultList
def getDictionaryFromSubJDL( self, name ):
""" Get a dictionary of the JDL attributes from a subsection
"""
tempList = self.get_expression( name )[1:-1]
resDict = {}
for item in tempList.split( ';' ):
if len( item.split( '=' ) ) == 2:
resDict[item.split( '=' )[0].strip()] = item.split( '=' )[1].strip().replace( '"', '' )
else:
return {}
return resDict
def deleteAttribute( self, name ):
"""Delete a named attribute
"""
if self.contents.has_key( name ):
del self.contents[name]
return 1
else:
return 0
def isOK( self ):
"""Check the JDL validity - to be defined
"""
if self.contents:
return 1
else:
return 0
def asJDL( self ):
"""Convert the JDL description into a string
"""
result = ''
for name, value in self.contents.items():
if value[0:1] == "{":
result = result + 4 * ' ' + name + " = \n"
result = result + 8 * ' ' + '{\n'
strings = value[1:-1].split( ',' )
for st in strings:
result = result + 12 * ' ' + st.strip() + ',\n'
result = result[:-2] + '\n' + 8 * ' ' + '};\n'
elif value[0:1] == "[":
tempad = ClassAd( value )
tempjdl = tempad.asJDL() + ';'
lines = tempjdl.split( '\n' )
result = result + 4 * ' ' + name + " = \n"
for line in lines:
result = result + 8 * ' ' + line + '\n'
else:
result = result + 4 * ' ' + name + ' = ' + str( value ) + ';\n'
return "[ \n" + result[:-1] + "\n]"
def getAttributeString( self, name ):
""" Get String type attribute value
"""
value = ''
if self.lookupAttribute( name ):
value = self.get_expression( name ).replace( '"', '' )
return value
def getAttributeInt( self, name ):
""" Get Integer type attribute value
"""
value = 0
if self.lookupAttribute( name ):
try:
value = int( self.get_expression( name ).replace( '"', '' ) )
except Exception:
value = 0
return value
def getAttributeBool( self, name ):
""" Get Boolean type attribute value
"""
if self.lookupAttribute( name ):
value = self.get_expression( name ).replace( '"', '' )
else:
return False
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
return False
def getAttributeFloat( self, name ):
""" Get Float type attribute value
"""
value = 0.0
if self.lookupAttribute( name ):
try:
value = float( self.get_expression( name ).replace( '"', '' ) )
except Exception:
value = 0.0
return value
def getAttributes( self ):
""" Get the list of all the attribute names
:return: list of names as strings
"""
return self.contents.keys() | gpl-3.0 | -481,757,788,879,941,700 | 26.027439 | 95 | 0.525496 | false |
google-research/robel | robel/scripts/check_mujoco_deps.py | 1 | 1290 | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks if the given MuJoCo XML file has valid dependencies.
Example usage:
python -m robel.scripts.check_mujoco_deps path/to/mujoco.xml
"""
import argparse
import logging
import os
from robel.utils.resources import AssetBundle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', nargs=1, help='The MuJoCo XML to parse.')
args = parser.parse_args()
model_path = args.path[0]
if not os.path.exists(model_path):
raise ValueError('Path does not exist: ' + model_path)
logging.basicConfig(level=logging.INFO)
with AssetBundle(dry_run=True, verbose=True) as bundle:
bundle.add_mujoco(model_path)
if __name__ == '__main__':
main()
| apache-2.0 | -2,726,141,442,768,360,400 | 29 | 74 | 0.723256 | false |
shgo/baixa_camara | obter_inteiro_teor.py | 1 | 7140 | #!/usr/bin/python3
#-*- encoding: utf-8 -*-
#Copyright (C) 2016 Saullo Oliveira
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Percorre as proposições já armazenadas, obter o inteiro teor de cada uma e
processa o texto.
"""
__author__ = "Saullo Oliveira"
__copyright__ = "Copyright 2016"
__credits__ = ["Saullo Oliveira"]
__license__ = "GPLv3"
__version__ = "0.1"
__maintainer__ = "Saullo Oliveira"
__email__ = "[email protected]"
__status__ = "Development"
from io import StringIO
import os.path
import argparse
import pickle as pkl
import urllib.request
import urllib.parse
import logging
import re
import magic
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from docx import Document
def get_inteiro_teor(prop):
"""
Obtém o conteúdo do inteiro teor de prop, e já tokeniza.
Args:
prop (Proposicao)
"""
print('{}\tObtendo inteiro teor da proposição {}'.format(
prop.ano, prop.id_))
print(prop.link_inteiro_teor)
#se o inteiro teor já foi coletado, não faz nada
if hasattr(prop, 'inteiro_teor'):
return prop
#caso não tenha link do inteiro teor
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(prop.link_inteiro_teor):
logging.warning('MISSING - %s não tem link para inteiro teor.\n',
prop.id_)
return prop
arquivo = urllib.request.urlretrieve(prop.link_inteiro_teor)
with open(arquivo[0], 'rb') as arq:
cabecalho = magic.from_file(arquivo[0])
texto = ''
try:
if cabecalho.startswith(b'PDF'):
parser = PDFParser(arq)
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
doc.initialize()
rsrcmgr = PDFResourceManager()
output = StringIO()
converter = TextConverter(rsrcmgr, output, laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, converter)
print('\t\tprocessando páginas')
for page in doc.get_pages():
interpreter.process_page(page)
texto = output.getvalue()
elif cabecalho.startswith(b'Com'):
document = Document(arq)
print('\t\tprocessando paragrafos')
for paragraph in document:
texto += paragraph.text
else:
raise Exception('Formato desconhecido')
print('\t\ttokenizando')
prop.inteiro_teor = re.split(r'\W+', texto)
except:
logging.warning('CORRUPT: %s arquivo corrupto! Oferecer dinheiro!',
prop.id_)
logging.warning(prop.link_inteiro_teor)
nome = 'inteiro_teor/inteiro_teor_{}.doc'.format(prop.id_)
with open(nome, 'wb') as salvar:
salvar.write(arq.read())
logging.warning('arquivo salvo em %s\n', nome)
return prop
def main():
#tratando os argumentos da linha de comando
parser = argparse.ArgumentParser(
description="""Baixa e processa os arquivos com inteiro teor para
cada proposição de lei no arquivo correspondente aos
parâmtros. As proposições que não forem processadas por
qualquer motivo (arquivo corrupto (hahaha), ou sem link)
estarão listadas no log, e os arquivos se corruptos,
serão baixados para a pasta inteiro_teor.""",
epilog="""Ex. de uso: para baixar o inteiro teor do arquivo
down_files/prop_props_PL_2016_apens_True.pkl:
./obter_inteiro_teor.py -anos 2016 -tipos PL -apensadas""")
parser.add_argument('-anos', type=int, action='append', nargs='*',
help="""anos das proposições já baixadas sem inteiro
teor.""")
parser.add_argument('-tipos', type=str, nargs='*',
help="""tipos de proposição já baixadas sem inteiro teor.""")
parser.add_argument('-apensadas', action='store_true',
help="""indica se o arquivo das proposições já baixadas
contém apensadas ou não. Útil para encontrar o
arquivo correto.""")
args = vars(parser.parse_args())
licensa = ("baixa_camara Copyright (C) 2016 Saullo Oliveira\n"
"This program comes with ABSOLUTELY NO WARRANTY;\n"
"This is free software, and you are welcome to redistribute it\n"
"under certain conditions; See COPYING file for more"
"information.\n"
"Type ENTER to continue...")
print(licensa)
input()
apens = args['apensadas']
for tp in args['tipos']:
for ano in args['anos'][0]:
print('Tipo {} ano {}.'.format(tp, ano))
logging.basicConfig(filename="logs/warnings_{}_{}.log".format(tp,
ano),
level=logging.WARNING)
if os.path.isfile('down_files/prop_props_{}_{}_apens{}.pkl'\
.format(tp, ano, apens)):
with open('down_files/prop_props_{}_{}_apens_{}.pkl'\
.format(tp, ano, apens), 'rb')\
as arq_prop:
print('Processando {}-{}'.format(tp, ano))
props = pkl.load(arq_prop)
props = [get_inteiro_teor(prop) for prop in props]
with open('down_files/prop_props_{}_{}_apens_{}.pkl'\
.format(tp, ano, apens), 'wb')\
as arq_prop:
print('Salvando {}-{}'.format(tp, ano))
pkl.dump(props, arq_prop)
else:
print(("\tarquivo não encontrado. Você já rodou o script "
"obter_proposicoes.py?"))
if __name__ == '__main__':
main()
| gpl-3.0 | -8,356,535,699,134,562,000 | 41.795181 | 103 | 0.562922 | false |
StartTheShift/thunderdome | thunderdome/properties.py | 1 | 13707 | # Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import copy
from datetime import datetime
from decimal import Decimal as D
import re
import time
import warnings
from uuid import uuid1, uuid4
from uuid import UUID as _UUID
from thunderdome.exceptions import ValidationError
# Saving strategies for thunderdome. These are used to indicate when a property
# should be saved after the initial vertex/edge creation.
#
# SAVE_ONCE - Only save this value once. If it changes throw an exception.
# SAVE_ONCHANGE - Only save this value if it has changed.
# SAVE_ALWAYS - Save this value every time the corresponding model is saved.
SAVE_ONCE = 1
SAVE_ONCHANGE = 2
SAVE_ALWAYS = 3
class BaseValueManager(object):
"""
Value managers are used to manage values pulled from the database and
track state changes.
"""
def __init__(self, instance, column, value):
"""
Initialize the value manager.
:param instance: An object instance
:type instance: mixed
:param column: The column to manage
:type column: thunder.columns.Column
:param value: The initial value of the column
:type value: mixed
"""
self._create_private_fields()
self.instance = instance
self.column = column
self.previous_value = value
self.value = value
def _create_private_fields(self):
self._previous_value = None
@property
def previous_value(self):
return self._previous_value
@previous_value.setter
def previous_value(self, val):
self._previous_value = copy.copy(val)
@property
def deleted(self):
"""
Indicates whether or not this value has been deleted.
:rtype: boolean
"""
return self.value is None and self.previous_value is not None
@property
def changed(self):
"""
Indicates whether or not this value has changed.
:rtype: boolean
"""
return self.value != self.previous_value
def getval(self):
"""Return the current value."""
return self.value
def setval(self, val):
"""
Updates the current value.
:param val: The new value
:type val: mixed
"""
self.value = val
def delval(self):
"""Delete a given value"""
self.value = None
def get_property(self):
"""
Returns a value-managed property attributes
:rtype: property
"""
_get = lambda slf: self.getval()
_set = lambda slf, val: self.setval(val)
_del = lambda slf: self.delval()
if self.column.can_delete:
return property(_get, _set, _del)
else:
return property(_get, _set)
class Column(object):
"""Base class for column types"""
value_manager = BaseValueManager
instance_counter = 0
def __init__(self,
primary_key=False,
index=False,
db_field=None,
default=None,
required=False,
save_strategy=None):
"""
Initialize this column with the given information.
:param primary_key: Indicates whether or not this is primary key
:type primary_key: boolean
:param index: Indicates whether or not this field should be indexed
:type index: boolean
:param db_field: The fieldname this field will map to in the database
:type db_field: str
:param default: Value or callable with no args to set default value
:type default: mixed or callable
:param required: Whether or not this field is required
:type required: boolean
:param save_strategy: Strategy used when saving the value of the column
:type save_strategy: int
"""
self.primary_key = primary_key
self.index = index
self.db_field = db_field
self.default = default
self.required = required
self.save_strategy = save_strategy
#the column name in the model definition
self.column_name = None
self.value = None
#keep track of instantiation order
self.position = Column.instance_counter
Column.instance_counter += 1
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.has_default:
return self.get_default()
elif self.required:
raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))
return value
def to_python(self, value):
"""
Converts data from the database into python values
raises a ValidationError if the value can't be converted
"""
return value
def to_database(self, value):
"""
Converts python value into database value
"""
if value is None and self.has_default:
return self.get_default()
return value
@property
def has_default(self):
"""
Indicates whether or not this column has a default value.
:rtype: boolean
"""
return self.default is not None
@property
def has_save_strategy(self):
"""
Indicates whether or not this column has a save strategy.
:rtype: boolean
"""
return self.save_strategy is not None
@property
def can_delete(self):
return not self.primary_key
def get_save_strategy(self):
"""
Returns the save strategy attached to this column.
:rtype: int or None
"""
return self.save_strategy
def get_default(self):
"""
Returns the default value for this column if one is available.
:rtype: mixed or None
"""
if self.has_default:
if callable(self.default):
return self.default()
else:
return self.default
def set_column_name(self, name):
"""
Sets the column name during document class construction This value will
be ignored if db_field is set in __init__
:param name: The name of this column
:type name: str
"""
self.column_name = name
@property
def db_field_name(self):
"""Returns the name of the thunderdome name of this column"""
return self.db_field or self.column_name
class String(Column):
def __init__(self, *args, **kwargs):
required = kwargs.get('required', False)
self.min_length = kwargs.pop('min_length', 1 if required else None)
self.max_length = kwargs.pop('max_length', None)
self.encoding = kwargs.pop('encoding', 'utf-8')
if 'default' in kwargs and isinstance(kwargs['default'], basestring):
kwargs['default'] = kwargs['default'].encode(self.encoding)
super(Text, self).__init__(*args, **kwargs)
def validate(self, value):
# Make sure that shit gets encoded properly
if isinstance(value, unicode):
value = value.encode(self.encoding)
value = super(Text, self).validate(value)
if value is None:
return None
if not isinstance(value, basestring) and value is not None:
raise ValidationError('{} is not a string'.format(type(value)))
if self.max_length:
if len(value) > self.max_length:
raise ValidationError('{} is longer than {} characters'.format(self.column_name, self.max_length))
if self.min_length:
if len(value) < self.min_length:
raise ValidationError('{} is shorter than {} characters'.format(self.column_name, self.min_length))
return value
Text = String
class Integer(Column):
def validate(self, value):
val = super(Integer, self).validate(value)
if val is None:
return
try:
return long(val)
except (TypeError, ValueError):
raise ValidationError("{} can't be converted to integral value".format(value))
def to_python(self, value):
if value is not None:
return long(value)
def to_database(self, value):
value = super(Integer, self).to_database(value)
if value is not None:
return long(value)
class DateTime(Column):
def __init__(self, strict=True, **kwargs):
"""
Initialize date-time column with the given settings.
:param strict: Whether or not to attempt to automatically coerce types
:type strict: boolean
"""
self.strict = strict
super(DateTime, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, datetime):
return value
return datetime.fromtimestamp(float(value))
def to_database(self, value):
value = super(DateTime, self).to_database(value)
if value is None:
return
if not isinstance(value, datetime):
if not self.strict and isinstance(value, (basestring, int, float)):
value = datetime.fromtimestamp(float(value))
else:
raise ValidationError("'{}' is not a datetime object".format(value))
tmp = time.mktime(value.timetuple()) # gives us a float with .0
# microtime is a 6 digit int, so we bring it down to .xxx and add it to the float TS
tmp = tmp + float(value.microsecond) / 1000000
return tmp
class UUID(Column):
"""Universally Unique Identifier (UUID) type - UUID4 by default"""
re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
def __init__(self, default=lambda: str(uuid4()), **kwargs):
super(UUID, self).__init__(default=default, **kwargs)
def validate(self, value):
val = super(UUID, self).validate(value)
if val is None:
return None # if required = False and not given
if not self.re_uuid.match(str(val)):
raise ValidationError("{} is not a valid uuid".format(value))
return val
def to_python(self, value):
val = super(UUID, self).to_python(value)
return str(val)
def to_database(self, value):
val = super(UUID, self).to_database(value)
if val is None:
return
return str(val)
class Boolean(Column):
def to_python(self, value):
return bool(value)
def to_database(self, value):
val = super(Boolean, self).to_database(value)
return bool(val)
class Double(Column):
def __init__(self, **kwargs):
self.db_type = 'double'
super(Double, self).__init__(**kwargs)
def validate(self, value):
val = super(Double, self).validate(value)
if val is None:
return None # required = False
try:
return float(value)
except (TypeError, ValueError):
raise ValidationError("{} is not a valid double".format(value))
def to_python(self, value):
if value is not None:
return float(value)
def to_database(self, value):
value = super(Double, self).to_database(value)
if value is not None:
return float(value)
class Float(Double):
"""Float class for backwards compatability / if you really want to"""
def __init__(self, **kwargs):
warnings.warn("Float type is deprecated. Please use Double.",
category=DeprecationWarning)
super(Float, self).__init__(**kwargs)
class Decimal(Column):
def to_python(self, value):
val = super(Decimal, self).to_python(value)
if val is not None:
return D(val)
def to_database(self, value):
val = super(Decimal, self).to_database(value)
if val is not None:
return str(val)
class Dictionary(Column):
def validate(self, value):
val = super(Dictionary, self).validate(value)
if val is None:
return None # required = False
if not isinstance(val, dict):
raise ValidationError('{} is not a valid dict'.format(val))
return val
class List(Column):
def validate(self, value):
val = super(List, self).validate(value)
if val is None:
return None # required = False
if not isinstance(val, (list, tuple)):
raise ValidationError('{} is not a valid list'.format(val))
return val
| mit | -7,597,682,304,716,384,000 | 28.540948 | 115 | 0.607208 | false |
tps12/freezing-shame | freezing/shame/tests/views/test_cart.py | 1 | 5789 | from django.test import TestCase
class CartTest(TestCase):
from django.test.client import Client
Client = staticmethod(Client)
from shame.models import Store
Store = staticmethod(Store)
from shame.models import Product
Product = staticmethod(Product)
from xml.etree import ElementTree
ElementTree = staticmethod(ElementTree)
def test_addtocart(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
self.assertLess(response.status_code, 400)
def test_addrequiressku(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'notasku': product.sku },
HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_addrequiresvalidsku(self):
from uuid import uuid4
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'sku': uuid4() },
HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_productinstore(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
store = self.Store(subdomain='another-store')
store.save()
response = self.Client().post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='another-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_showcart(self):
store = self.Store(subdomain='the-store')
store.save()
response = self.Client().get('/cart', HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 200)
def test_hasnewcontents(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
client = self.Client()
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'Thingy', response.content)
def test_pricesandtotals(self):
store = self.Store(subdomain='the-store')
store.save()
a = self.Product(store=store, name='Thing A', price=123)
a.save()
b = self.Product(store=store, name='Thing B', price=456)
b.save()
client = self.Client()
for product in a, a, b:
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'$1.23', response.content)
self.assertIn(b'$2.46', response.content) # == 2 * 1.23
self.assertIn(b'$4.56', response.content)
self.assertIn(b'$7.02', response.content) # == 2 * 1.23 + 4.56
def test_onecartperstore(self):
store1 = self.Store(subdomain='the-store')
store1.save()
a = self.Product(store=store1, name='Thing A', price=123)
a.save()
store2 = self.Store(subdomain='another-store')
store2.save()
b = self.Product(store=store2, name='Thing B', price=456)
b.save()
client = self.Client()
for store, product in (store1, a), (store1, a), (store2, b):
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='{}.example.biz'.format(store))
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'$1.23', response.content)
self.assertNotIn(b'$4.56', response.content)
response = client.get(
'/cart',
HTTP_HOST='another-store.example.biz')
self.assertNotIn(b'$1.23', response.content)
self.assertIn(b'$4.56', response.content)
def test_nocheckoutifempty(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().get('/cart', HTTP_HOST='the-store.example.biz')
for form in self.ElementTree.fromstring(response.content).iter('form'):
if form.attrib['action'].endswith('/checkout'):
self.fail()
def test_checkoutbutton(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
client = self.Client()
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get('/cart', HTTP_HOST='the-store.example.biz')
for form in self.ElementTree.fromstring(response.content).iter('form'):
if form.attrib['action'].endswith('/checkout'):
self.assertEqual(form.attrib['method'], 'POST')
break
else:
self.fail()
| gpl-3.0 | 6,249,128,531,331,405,000 | 29.308901 | 80 | 0.564346 | false |
isaacyeaton/global-dyn-non-equil-gliding | Code/script_airfoil_snake.py | 1 | 18626 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 14 14:16:45 2014
%reset -f
%clear
%pylab
%load_ext autoreload
%autoreload 2
@author: isaac
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
from scipy.io import loadmat
# setup better plots
import plots
reload(plots)
from plots import bmap, rcj, tl
import eqns
reload(eqns)
# %% Load in the provided data
data = loadmat('Data/Holden2014/aero_data_mod360.mat')
# get out the data
Clall = data['C_lift'].flatten()
Cdall = data['C_drag'].flatten()
alphaall = data['alpha'].flatten()
# %% "raw" experimental data
idx_exp = np.where((alphaall >= -np.deg2rad(12)) &
(alphaall <= np.deg2rad(61)))[0]
ale = alphaall[idx_exp]
Cle = Clall[idx_exp]
Cde = Cdall[idx_exp]
ClCde = Cle / Cde
#Clprimee = np.gradient(Cle, np.deg2rad(5))
#Cdprimee = np.gradient(Cde, np.deg2rad(5))
Cl_fun = UnivariateSpline(ale, Cle, k=1, s=0)
Cd_fun = UnivariateSpline(ale, Cde, k=1, s=0)
#ClCd_fun = interp1d(ale, ClCde, bounds_error=False)
Clprime_fun = Cl_fun.derivative()
Cdprime_fun = Cd_fun.derivative()
Clprimee = Clprime_fun(ale)
Cdprimee = Cdprime_fun(ale)
# %% "valid" region where date was recorded (-10 to 60 deg aoa)
idx_fit = np.where((alphaall >= -np.deg2rad(12)) &
(alphaall <= np.deg2rad(61)))[0] # was 91
alf = alphaall[idx_fit]
Clf = Clall[idx_fit]
Cdf = Cdall[idx_fit]
ClCdf = Clf / Cdf
#Clprimef = np.gradient(Clf, 5)
#Cdprimef = np.gradient(Cdf, 5)
#s = .005
s = .0001
cl_fun = UnivariateSpline(alf, Clf, s=s, k=2)
cd_fun = UnivariateSpline(alf, Cdf, s=s, k=2)
clprime_fun = cl_fun.derivative()
cdprime_fun = cd_fun.derivative()
# numerically evaluate the spline
al = np.linspace(alf[0], alf[-1], 500)
cl = cl_fun(al)
cd = cd_fun(al)
clprime = clprime_fun(al)
cdprime = cdprime_fun(al)
clcd = cl / cd
# %% Cl, Cd, and ClCd curves for paper (updated)
fig, ax = plt.subplots()
ax.axvline(0, color='gray', lw=1)
ax.axhline(0, color='gray', lw=1)
ax.plot(np.rad2deg(alf), Clf, 'o', ms=6, label=r'$C_L$')
ax.plot(np.rad2deg(alf), Cdf, 's', ms=6, label=r'$C_D$')
ax.plot(np.rad2deg(alf), ClCdf, '^', ms=6, label=r'$C_L/C_D$')
ax.plot(np.rad2deg(al), cl, color=bmap[0], lw=1.5)
ax.plot(np.rad2deg(al), cd, color=bmap[1], lw=1.5)
ax.plot(np.rad2deg(al), clcd, color=bmap[2], lw=1.5)
ax.set_xlim(-15, 65)
ax.set_ylim(-2, 3)
ax.legend(loc='lower right', frameon=False, fontsize=18)
ax.set_xlabel(r'$\alpha$', fontsize=18)
ax.set_ylabel('force coefficients', fontsize=18)
fig.canvas.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
ax.text(5, 2.5, 'airfoil snake', {'fontsize': 18})
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure4b_airfoil_snake.pdf', transparent=True)
# %% Intersections with spline data (for paper about pitch effects)
gamma = al
cgamma = 1 / np.tan(gamma)
pitch_array = np.deg2rad(np.array([-10, 10]))
_gamma_equil = np.deg2rad(np.linspace(10, 70, 1000))
fig, ax = plt.subplots()
ax.plot(np.rad2deg(gamma[gamma > 0]), cgamma[gamma > 0], c=bmap[2], lw=2,
label=r'$\cot{\gamma}$')
for idx, pitch in enumerate(pitch_array):
alpha = gamma + pitch
drag = cd_fun(alpha)
lift = cl_fun(alpha)
ratio = lift / drag
goodidx = np.where((alpha > al[0]) & (alpha < al[-1]))[0]
lb_txt = r'$\theta = {:.0f}$'.format(np.rad2deg(pitch))
lb_txt = lb_txt + u'\u00B0'
_ln, = ax.plot(np.rad2deg(gamma[goodidx]), ratio[goodidx], lw=2,
label=lb_txt, c=bmap[idx])
# find equilibrium points
peq, geq = eqns.pitch_bifurcation([pitch], _gamma_equil, cl_fun, cd_fun,
angle_rng=(al[0], al[-1])).T
aeq = peq + geq
ratio_eq = cl_fun(aeq) / cd_fun(aeq)
_c = _ln.get_color()
ax.plot(np.rad2deg(geq), ratio_eq, 'o', c=_c, mec=_c, ms=9)
# for i in range(len(geq)):
# ax.axvline(np.rad2deg(geq[i]), color=_c)
leg = ax.legend(loc='upper right', frameon=False, fontsize=18)
#ax.set_xlim(np.deg2rad(np.r_[-10, 90]))
ax.set_xlim(0, 60)
ax.set_ylim(0, 3)
ax.set_xlabel(r'$\gamma$, glide angle', fontsize=18)
ax.set_ylabel(r'$C_L/C_D(\gamma + \theta)$', fontsize=18)
fig.canvas.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure2_effect_of_pitch.pdf', transparent=True)
# %% Find the glide angle and velocity at equilibrium (pitch of 0 deg)
peq, geq = eqns.pitch_bifurcation([0], _gamma_equil, cl_fun, cd_fun,
angle_rng=(al[0], al[-1])).T
peq, geq = float(peq), float(geq)
veq = eqns.v_equil(geq, cl_fun, cd_fun)
vxeq, vzeq = eqns.vxvz_equil(veq, geq)
cleq, cdeq = cl_fun(geq), cd_fun(geq)
assert np.allclose(np.arctan(cdeq / cleq), geq)
# %% Find equilibrium points
pitches = np.deg2rad(np.linspace(-25, 25, 4000))
gammas = np.deg2rad(np.linspace(10, 70, 1000))
sn_angle_rng = (al[0], al[-1])
sn_equil_exp = eqns.pitch_bifurcation(pitches, gammas, Cl_fun, Cd_fun,
angle_rng=sn_angle_rng)
sn_equil_spl = eqns.pitch_bifurcation(pitches, gammas, cl_fun, cd_fun,
angle_rng=sn_angle_rng)
# %% Classify the stability of fixed points
sn_td_exp, sn_ev_exp = eqns.tau_delta(sn_equil_exp, Cl_fun, Cd_fun,
Clprime_fun, Cdprime_fun,
angle_rng=sn_angle_rng)
sn_td_spl, sn_ev_spl = eqns.tau_delta(sn_equil_spl, cl_fun, cd_fun,
clprime_fun, cdprime_fun,
angle_rng=sn_angle_rng)
# %% Classification of fixed points
sn_nuni_exp, sn_uni_exp, sn_class_exp = eqns.classify_fp(sn_td_exp)
sn_nuni_spl, sn_uni_spl, sn_class_spl = eqns.classify_fp(sn_td_spl)
possible_class = ['saddle point', 'unstable focus', 'unstable node',
'stable focus', 'stable node']
bfbmap = [bmap[0], bmap[4], bmap[2], bmap[3], bmap[1]]
# %% Acceleration along terminal manifold when we have a saddle point
sad_idx = np.where(sn_class_spl == 'saddle point')[0]
sad_pitch, sad_gamma = sn_equil_spl[sad_idx].T
# we have some double saddle points below theta =2 deg; remove these
sad_idx = np.where(sad_pitch >= np.deg2rad(2))[0]
sad_pitch, sad_gamma = sad_pitch[sad_idx], sad_gamma[sad_idx]
sad_aoa = sad_pitch + sad_gamma
dcl_fun = cl_fun.derivative()
ddcl_fun = dcl_fun.derivative()
dcd_fun = cd_fun.derivative()
ddcd_fun = dcd_fun.derivative()
# 2nd order spline, needs more to get higher derivatives
#dddcl_fun = ddcl_fun.derivative()
#dddcd_fun = ddcd_fun.derivative()
# evaluate force coefficients at the saddle
sad_cl = cl_fun(sad_aoa)
sad_dcl = dcl_fun(sad_aoa)
sad_ddcl = ddcl_fun(sad_aoa)
sad_ddcl = np.zeros_like(sad_aoa)
sad_cd = cd_fun(sad_aoa)
sad_dcd = dcd_fun(sad_aoa)
sad_ddcd = ddcd_fun(sad_aoa)
sad_dddcd = np.zeros_like(sad_aoa)
# place the values in a large array for export
sad_angles = np.c_[np.rad2deg(sad_pitch), sad_pitch, sad_gamma, sad_aoa]
sad_lift = np.c_[sad_cl, sad_dcl, sad_ddcl, sad_ddcl]
sad_drag = np.c_[sad_cd, sad_dcd, sad_ddcd, sad_ddcd]
sad_export = np.c_[sad_angles, sad_lift, sad_drag]
# %%
# save the data
import pandas as pd
node_idx = np.where(sn_class_spl == 'stable node')[0]
node_pitch, node_gamma = sn_equil_spl[node_idx].T
# nodes, select ones with saddles
node_idx_with_saddles = np.where(np.in1d(node_pitch, sad_pitch))[0]
node_pitch = node_pitch[node_idx_with_saddles]
node_gamma = node_gamma[node_idx_with_saddles]
# do the reverse to ensure we have the same number of values
sad_idx_with_nodes = np.where(np.in1d(sad_pitch, node_pitch))[0]
# too many indices...
node_idx_with_saddles = []
for i in np.arange(len(sad_pitch)):
s_pitch = sad_pitch[i]
idx = np.where(node_pitch == s_pitch)[0]
if len(idx) == 0:
continue
elif len(idx) == 1:
node_idx_with_saddles.append(idx)
elif len(idx) > 1:
for ii in np.arange(len(idx)):
node_idx_with_saddles.append(idx[ii])
node_idx_with_saddles = np.array(node_idx_with_saddles)
# %% Spline bifurcation plot (deg) for paper
rd = np.rad2deg
gam_high = sn_angle_rng[0] - pitches # closer to 0
gam_low = sn_angle_rng[1] - pitches # closer to 90
fig, ax = plt.subplots()
ax.fill_between(rd(pitches), rd(gam_high), 0, color='gray', alpha=.1, lw=0)
ax.fill_between(rd(pitches), rd(gam_low), 60, color='gray', alpha=.1, lw=0)
ax.axvline(0, color='gray')
ax.axvline(5, color='gray')
for ii, fp_kind in enumerate(possible_class):
idx = np.where(sn_class_spl == fp_kind)[0]
if len(idx) > 0:
ax.plot(rd(sn_equil_spl[idx, 0]), rd(sn_equil_spl[idx, 1]), 'o',
c=bfbmap[ii], ms=2.5, label=fp_kind)
_leg = ax.legend(loc='lower left', markerscale=3, fancybox=True, framealpha=.75,
frameon=True, fontsize=16)
_leg.get_frame().set_color('w')
ax.set_xlim(-15, 15)
ax.set_ylim(60, 0)
#ax.set_ylabel(r'$\gamma^*$, equilibrium glide angle', fontsize=18)
#ax.set_xlabel(r'$\theta$, pitch angle', fontsize=18)
ax.set_ylabel(r'$\gamma^*$ ', fontsize=18, rotation=0)
ax.set_xlabel(r'$\theta$', fontsize=18)
ax.text(-13, 5, 'airfoil snake', {'fontsize': 18})
fig.canvas.draw()
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure6b_bifurcation_airfoil_snake.pdf',
transparent=True)
# %% Velocity polar diagram, pitch = 0
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
pitch = 0
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$0' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bi_vpd0_airfoil_snake.pdf', transparent=True)
# %% Velocity polar diagram, pitch = 0 with Z nullcline
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
pitch = 0
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, nullcline_z=True,
fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$0' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bi_vpd0_nullcline_airfoil_snake.pdf',
transparent=True)
# %% Velocity polar diagram, pitch = 5
pitch = np.deg2rad(5)
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$5' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bii_vpd5_airfoil_snake.pdf', transparent=True)
# %% Velocity polar diagram, pitch = 5, with manifold approximations
man_folder = './Data/airfoil snake manifold/'
man_2 = np.genfromtxt(man_folder + 'manifold_2nd_order.csv', delimiter=',')
man_3 = np.genfromtxt(man_folder + 'manifold_3rd_order.csv', delimiter=',')
vx_2, vz_2 = man_2.T
vx_3, vz_3 = man_3.T
pitch = np.deg2rad(5)
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
ax.plot(vx_2, vz_2, c=bmap[2], label='2nd-order approx.')
ax.plot(vx_3, vz_3, c=bmap[3], label='3rd-order approx.')
ax.legend(loc='lower right', frameon=True)
ax.set_xlim(.55, .8)
ax.set_ylim(-.525, -.275)
fig.savefig('Figures/figure5bii_inset_vpd5_airfoil_snake.pdf',
transparent=False)
# %% Supplement figure - Acclerations along the terminal velocity manifold
gam_2 = -np.arctan(vz_2 / vx_2)
gam_3 = -np.arctan(vz_3 / vx_3)
ptc_2 = np.deg2rad(5)
ptc_3 = np.deg2rad(5)
aoa_2 = gam_2 + ptc_2
aoa_3 = gam_3 + ptc_3
cl_2 = cl_fun(aoa_2)
cd_2 = cd_fun(aoa_2)
cl_3 = cl_fun(aoa_3)
cd_3 = cd_fun(aoa_3)
ax_2, az_2 = eqns.cart_eqns(vx_2, vz_2, cl_2, cd_2)
ax_3, az_3 = eqns.cart_eqns(vx_3, vz_3, cl_3, cd_3)
a_2 = np.sqrt(ax_2**2 + az_2**2)
a_3 = np.sqrt(ax_3**2 + az_3**2)
xx_2 = np.arange(len(a_2))
xx_3 = np.arange(len(a_3))
# arbitrary shift the indices for plotting; saddle at zero, stable node at 1
xx_2 = (xx_2 - 150) / 150
xx_3 = (xx_3 - 150) / 150
fig, ax = plt.subplots()
ax.axhline(.1, color=bmap[3], lw=1, label='low acceleration contour')
ax.axvline(0, color=bmap[0], lw=1, ls='--', label='location of saddle point')
ax.axvline(.93, color=bmap[1], lw=1, ls='--', label='location of stable node')
ax.plot(xx_2, a_2, c=bmap[2], lw=2, label='2nd order approx.')
ax.plot(xx_3, a_3, c=bmap[3], lw=2, label='3rd order approx.')
ax.legend(loc='upper left', frameon=True)
ax.set_xlabel('distance along terminal velocity manifold')
ax.set_ylabel('acceleration magnitude')
rcj(ax)
tl(fig)
fig.savefig('Figures/figure_SI_acceleration_along_manifold.pdf',
transparent=True)
# %% Figure 1 - show how VPD differs from time series approach
pitch = 0
ts = np.linspace(0, 30, 351)
vxseed, vzseed = np.r_[.4], np.r_[0]
odeargs = (pitch, cl_fun, cd_fun)
for i in range(len(vxseed)):
x0 = (0, 0, vxseed[i], vzseed[i])
soln = plots.ps_traj(x0, ts, odeargs, eqns.cart_model, arng,
vxlim, vzlim)
ntime = len(ts)
# unpack values
xs, zs, vxs, vzs = soln.T
gs = eqns.calc_gamma(vxs, vzs)
# just plot once the glide angle derivative is slow
idx = np.where(np.abs(np.gradient(gs)) >= 1e-4)[0]
xs, zs = xs[idx], zs[idx]
vxs, vzs = vxs[idx], vzs[idx]
gs = gs[idx]
ts = ts[idx]
accxs, acczs = np.zeros(len(ts)), np.zeros(len(ts))
for k in np.arange(len(ts)):
x0 = (xs[k], zs[k], vxs[k], vzs[k])
_, _, accxs[k], acczs[k] = eqns.cart_model(x0, ts[k], odeargs)
vmag = np.sqrt(vxs**2 + vzs**2)
accmag = np.sqrt(accxs**2 + acczs**2)
i0 = gs.argmax()
np.where(accmag <= 0.1)[0]
i1 = 15
i2 = 139
i3 = 147
ii = np.r_[i1, i2, i3]
ii = np.r_[140] # , 147] # end of bump in acceleration
# %% Plot time histories
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, sharex=True, figsize=(4.2, 8))
ax1.axhline(0, color='gray')
ax2.axhline(0, color='gray')
ax3.axhline(0, color='gray')
ax4.axhline(gs[-1], color='gray', ls=':')
lw = 1.5
ax1.plot(ts, xs, 'k', lw=lw)
ax1.plot(ts, zs, 'k--', lw=lw)
ax2.plot(ts, vxs, 'k', lw=lw)
ax2.plot(ts, vzs, 'k--', lw=lw)
ax3.plot(ts, accxs, 'k', label='horizontal', lw=lw)
ax3.plot(ts, acczs, 'k--', label='vertical', lw=lw)
ax4.plot(ts, gs, 'k', lw=lw)
# plot velocity and acceleration magnitudes
# ax3.plot(ts, accmag, 'k:', label='magnitude', lw=lw)
# ax2.plot(ts, vmag, 'k:', lw=lw)
kwargs = dict(marker='o', ms=7, mfc=None, mec='gray', mew=1, fillstyle='none')
ax1.plot(ts[i0], xs[i0], 'o', ms=7, c='gray')
ax1.plot(ts[i0], zs[i0], 'o', ms=7, c='gray')
ax1.plot(ts[ii], xs[ii], **kwargs)
ax1.plot(ts[ii], zs[ii], **kwargs)
ax2.plot(ts[i0], vxs[i0], 'o', ms=7, c='gray')
ax2.plot(ts[i0], vzs[i0], 'o', ms=7, c='gray')
ax2.plot(ts[ii], vxs[ii], **kwargs)
ax2.plot(ts[ii], vzs[ii], **kwargs)
ax3.plot(ts[i0], accxs[i0], 'o', ms=7, c='gray')
ax3.plot(ts[i0], acczs[i0], 'o', ms=7, c='gray')
ax3.plot(ts[ii], accxs[ii], **kwargs)
ax3.plot(ts[ii], acczs[ii], **kwargs)
ax4.plot(ts[i0], gs[i0], 'o', ms=7, c='gray')
ax4.plot(ts[ii], gs[ii], **kwargs)
ax3.legend(loc='lower right', fontsize=18)
for ax in [ax1, ax2, ax3, ax4]:
ax.set_yticks([])
ax.set_xticks([])
ttext = .5
ax1.text(ttext, .9 * np.r_[xs, zs].max(), 'position', fontsize=18)
ax2.text(ttext, .9 * np.r_[vxs, vzs].max(), 'velocity', fontsize=18)
ax3.text(ttext, .9 * np.r_[accxs, acczs].max(), 'acceleration',
fontsize=18)
ax4.text(ttext, .85 * np.pi / 2, 'glide angle', fontsize=18)
ax4.set_xlabel('time', fontsize=18)
#ax1.set_ylabel('position', fontsize=18)
#ax2.set_ylabel('velocity', fontsize=18)
#ax3.set_ylabel('acceleration', fontsize=18)
#ax4.set_ylabel('glide angle', fontsize=18)
ax4.set_xlim(0, ts[-1])
ax4.set_ylim(0, np.pi / 2)
rcj(ax1)
rcj(ax2)
rcj(ax3)
rcj(ax4)
tl(fig)
fig.savefig('Figures/1abcd_time_histories.pdf', transparent=True)
# %% Plot x-z space
skip = 10
fig, ax = plt.subplots(figsize=(4.2, 4.))
ax.plot(xs, zs, 'k-x', lw=1.5, markevery=skip, mew=.75)
ax.plot(xs[i0], zs[i0], 'o', ms=7, c='gray')
ax.plot(xs[ii], zs[ii], **kwargs)
ax.set_xlabel(r'$x$', fontsize=20)
ax.set_ylabel(r'$z$ ', rotation=0, fontsize=20)
ax.set_yticks([])
ax.set_xticks([])
ax.set_aspect('equal', adjustable='box')
ax.margins(0, .03)
rcj(ax)
tl(fig)
fig.savefig('Figures/1e_position_space.pdf', transparent=True)
# %% Plot velocity polar diagram
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = plt.subplots(figsize=(4.2, 4))
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=fig, ax=ax, acc_contour=True)
ax.plot(vxs, vzs, 'kx-', lw=1.5, markevery=skip, mew=.75, ms=5)
ax.plot(vxs[i0], vzs[i0], 'o', ms=7, c='gray')
ax.plot(vxs[ii], vzs[ii], **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$v_x$', fontsize=20)
ax.set_ylabel(r'$v_z$ ', fontsize=20, rotation=0)
fig.savefig('Figures/1f_velocity_space.pdf', transparent=True)
| mit | -5,088,139,220,984,201,000 | 26.925037 | 80 | 0.633953 | false |
brain-tec/partner-contact | partner_multi_relation_tabs/tests/test_tab.py | 1 | 1766 | # Copyright 2018 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import common
from ..tablib import Tab
class TestTab(common.TestCommon):
def test_create_page(self):
self.assertTrue(bool(self.tab_board))
tab_obj = Tab(self.tab_board)
page = tab_obj.create_page()
# And we should have a field for (amongst others) selection_type_id.
field = page.xpath('//field[@name="type_selection_id"]')
self.assertTrue(field, 'Field selection_type_id not in page.')
def test_visibility(self):
"""Tab positions should be shown for functionaries, but not others."""
self.assertTrue(bool(self.tab_positions))
self.assertTrue(bool(self.partner_important_person))
self.assertTrue(bool(self.partner_common_person))
tab_obj = Tab(self.tab_positions)
self.assertTrue(
tab_obj.compute_visibility(self.partner_important_person),
'Positions tab should be visible for functionary.')
self.assertFalse(
tab_obj.compute_visibility(self.partner_common_person),
'Positions tab should not be visible for non-functionary.')
# Tab for departments should only be visible for main partner
self.assertTrue(bool(self.tab_departments))
self.assertTrue(bool(self.partner_big_company))
tab_obj = Tab(self.tab_departments)
self.assertTrue(
tab_obj.compute_visibility(self.env.ref('base.main_partner')),
'Department tab should be visible for main partner.')
self.assertFalse(
tab_obj.compute_visibility(self.partner_big_company),
'Department tab should not be visible for other partners.')
| agpl-3.0 | 360,778,801,671,219,900 | 44.282051 | 78 | 0.660815 | false |
bb111189/CryptoKnocker | CryptoKnocker/CryptoKnocker/settings.py | 1 | 2239 | """
Django settings for CryptoKnocker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+6itzn97vm^deyw1c!g8h(i(u1pu%fg-^_vj*kabc#t_lqbd-7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.db',
'django.forms',
'mainpage',
'management',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CryptoKnocker.urls'
WSGI_APPLICATION = 'CryptoKnocker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
MEDIA_ROOT = os.path.join(os.getcwd())
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(os.getcwd(), 'static'),
)
#view path
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
| mit | 2,499,307,004,532,393,000 | 23.075269 | 71 | 0.714605 | false |
peragro/peragro-at | src/damn_at/metadatastore.py | 1 | 2111 | """
The MetaDataStore handler.
"""
from __future__ import absolute_import
import os
from damn_at.utilities import is_existing_file, pretty_print_file_description
from damn_at.bld import hash_to_dir
from damn_at.serialization import SerializeThriftMsg, DeserializeThriftMsg
from damn_at import FileDescription
from io import open
class MetaDataStoreException(Exception):
"""Base MetaDataStore Exception"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.msg = msg
def __str__(self):
return repr(self.msg)
class MetaDataStoreFileException(MetaDataStoreException):
"""Something wrong with the file"""
pass
class MetaDataStore(object):
"""
A filesystem MetaDataStore implementation.
"""
def __init__(self, store_path):
self.store_path = store_path
if not os.path.exists(self.store_path):
os.makedirs(self.store_path)
def is_in_store(self, store_id, an_hash):
"""
Check if the given file hash is in the store.
"""
return is_existing_file(os.path.join(self.store_path, hash_to_dir(an_hash)))
def get_metadata(self, store_id, an_hash):
"""
Get the FileDescription for the given hash.
"""
try:
with open(os.path.join(self.store_path, hash_to_dir(an_hash)), 'rb') as metadata:
a_file_descr = DeserializeThriftMsg(FileDescription(), metadata.read())
return a_file_descr
except IOError as ioe:
raise MetaDataStoreFileException('Failed to open FileDescription with hash %s' % an_hash, ioe)
def write_metadata(self, store_id, an_hash, a_file_descr):
"""
Write the FileDescription to this store.
"""
data = SerializeThriftMsg(a_file_descr)
path = os.path.join(self.store_path, hash_to_dir(an_hash))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as metadata:
metadata.write(data)
return a_file_descr
| bsd-3-clause | 1,144,821,180,542,708,000 | 30.984848 | 106 | 0.636191 | false |
moyaproject/moya | moya/settings.py | 1 | 10368 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from .containers import OrderedDict
from . import iniparse
from .compat import text_type, string_types, PY2, implements_to_string, implements_bool
from . import errors
from .tools import textual_list
from fs.path import dirname, join, normpath, relpath
import io
import os
def read_settings(fs, path):
with fs.safeopen(path, "rb") as settings_file:
cfg = iniparse.parse(settings_file)
return cfg
@implements_to_string
class SettingsKeyError(KeyError):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SettingsContainer(OrderedDict):
@classmethod
def apply_master(self, master, settings):
for section_name, section in master.items():
if section_name == "service":
continue
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
@classmethod
def read(cls, fs, path, master=None):
visited = []
if not isinstance(path, string_types):
for p in path:
if fs.isfile(p):
path = p
break
else:
raise errors.SettingsError(
"""settings file not found (looked for {} in {})""".format(
textual_list(path, join_word="and"), fs
)
)
settings_stack = []
while 1:
path = relpath(normpath(path))
if path in visited:
raise errors.SettingsError(
"""recursive extends detected, "{}" has already been extended""".format(
path
)
)
with fs.open(path, "rt") as settings_file:
s = iniparse.parse(
settings_file,
SettingsContainer(),
section_class=SettingsSectionContainer,
)
visited.append(path)
settings_stack.append(s)
if "extends" in s[""]:
# path = s['']['extends']
path = join(dirname(path), s[""]["extends"])
else:
break
settings_stack = settings_stack[::-1]
settings = settings_stack[0]
s = cls.__class__(settings_stack[0])
for s in settings_stack[1:]:
for section_name, section in s.items():
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
if master is not None:
cls.apply_master(master, settings)
return settings
@classmethod
def read_os(cls, path):
visited = []
settings_stack = []
while 1:
path = os.path.abspath(os.path.normpath(path))
if path in visited:
raise errors.SettingsError(
"""recursive extends detected, "{}" has already been extended""".format(
path
)
)
with io.open(path, "rt") as settings_file:
s = iniparse.parse(
settings_file,
SettingsContainer(),
section_class=SettingsSectionContainer,
)
visited.append(path)
settings_stack.append(s)
if "extends" in s[""]:
path = s[""]["extends"]
else:
break
settings_stack = settings_stack[::-1]
settings = settings_stack[0]
s = cls.__class__(settings_stack[0])
for s in settings_stack[1:]:
for section_name, section in s.items():
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
return settings
@classmethod
def read_from_file(self, settings_file):
"""Reads settings, but doesn't do any extends processing"""
settings = iniparse.parse(
settings_file, SettingsContainer(), section_class=SettingsSectionContainer
)
return settings
@classmethod
def from_dict(self, d):
return SettingsSectionContainer((k, SettingContainer(v)) for k, v in d.items())
@classmethod
def create(cls, **kwargs):
return cls.from_dict(kwargs)
def export(self, output_file, comments=None):
"""Write the settings to an open file"""
ini = iniparse.write(self, comments=comments)
output_file.write(ini)
def copy(self):
return SettingsContainer(self)
def __getitem__(self, key):
try:
return super(SettingsContainer, self).__getitem__(key)
except KeyError:
return EmptySettings()
def get(self, section_name, key, default=Ellipsis):
if section_name not in self:
if default is Ellipsis:
raise SettingsKeyError(
"required section [%s] not found in settings" % section_name
)
else:
return default
section = self[section_name]
if key not in section:
if default is Ellipsis:
raise SettingsKeyError(
"key '%s' not found in section [%s]" % (key, section_name)
)
else:
return default
return section[key]
def set(self, section_name, key, value):
if section_name not in self:
self[section_name] = SettingsSectionContainer()
self[section_name][key] = value
def get_bool(self, section_name, key, default=False):
value = self.get(section_name, key, "yes" if default else "no")
return value.strip().lower() in ("yes", "true")
def get_list(self, section_name, key, default=""):
value = self.get(section_name, key, default=default)
return [line.strip() for line in value.splitlines() if line.strip()]
def get_int(self, section_name, key, default=None):
value_text = self.get(section_name, key, None)
if value_text is None or not value_text.strip():
return None
try:
value = int(value_text)
except:
raise SettingsKeyError(
"key [{}]/{} should be empty or an integer value (not '{}')".format(
section_name, key, value_text
)
)
else:
return value
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
table += sorted(self.items())
console.table(table)
class SettingsSectionContainer(OrderedDict):
def get_int(self, key, default=None):
if key not in self:
return default
value = int(self[key])
return value
def get_bool(self, key, default=False):
value = self.get(key, "yes" if default else "no")
return value.strip().lower() in ("yes", "true")
def get_list(self, key, default=""):
value = self.get(key, default)
return [line.strip() for line in value.splitlines() if line.strip()]
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
table += sorted(self.items())
console.table(table)
def __setitem__(self, key, value):
value = SettingContainer(text_type(value))
super(SettingsSectionContainer, self).__setitem__(key, value)
@implements_bool
class EmptySettings(object):
def __getitem__(self, key):
if key == "list":
return []
if key == "bool":
return False
if key == "int":
return 0
return ""
def __repr__(self):
return "<emptysettings>"
def get_int(self, key, default=None):
return default
def get_bool(self, key, default=False):
return default
def get_list(self, key, default=""):
return default
def get(self, key, default=None):
return default
def __bool__(self):
return False
def __unicode__(self):
return ""
def __iter__(self):
return iter([])
def items(self):
return []
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
console.table(table)
@implements_to_string
class SettingContainer(text_type):
def __init__(self, setting_text):
if PY2:
super(SettingContainer, self).__init__(setting_text)
else:
super().__init__()
# self.raw = setting_text
self.setting_text = setting_text.strip()
self.lines = [line.strip() for line in setting_text.splitlines()] or []
self.first = self.lines[0] if self.lines else ""
self.bool = self.setting_text.lower() in ("yes", "true")
try:
self.int = int(self.setting_text)
except ValueError:
self.int = None
try:
self.float = float(self.setting_text)
except ValueError:
self.float = None
def __str__(self):
return self.setting_text
def __getitem__(self, index):
if isinstance(index, string_types):
if index == "list":
return self.lines
elif index == "bool":
return self.bool
elif index == "int":
return self.int
elif index == "float":
return self.float
return self.lines[index]
def __eq__(self, other):
return self.first == other
def __ne__(self, other):
return self.first != other
if not PY2:
def __hash__(self):
return super().__hash__()
if __name__ == "__main__":
settings = SettingsContainer()
print(settings["nothere"])
s = SettingContainer("foo\nbar")
print(s == "foo")
print(s["list"])
| mit | -4,349,579,899,935,977,000 | 29.404692 | 92 | 0.53559 | false |
Macainian/BaseDjangoProject | website/management/commands/create_admin_account_migration.py | 1 | 1751 | import os
from django.core.management.base import BaseCommand
from website.settings import BASE_DIR
class Command(BaseCommand):
def handle(self, *args, **options):
migrations_folder = os.path.join(BASE_DIR, "website", "migrations")
admin_account_migration_text = self.get_admin_account_migration_text()
# Create BrowseView.py
with open(os.path.join(migrations_folder, "0001_initial.py"), "w+") as admin_account_migration_file:
admin_account_migration_file.write(admin_account_migration_text)
def get_admin_account_migration_text(self):
# The string below is specifically formatted this way to ensure that it looks correct on the actual file
# since we are using """
return \
"""from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from website.apps.staff_member_manager.models import StaffMember
def add_staff_members(apps, schema_editor):
if not StaffMember.objects.filter(user__username="admin").exists():
user = User.objects.create(username="admin")
staff_member = StaffMember.objects.create(user=user)
staff_member.user.is_staff = True
staff_member.user.is_active = True
staff_member.user.is_superuser = True
staff_member.user.set_password("1")
staff_member.generated_password = ""
staff_member.user.first_name = "System"
staff_member.user.last_name = "Superuser"
staff_member.user.save()
staff_member.save()
class Migration(migrations.Migration):
dependencies = [
("staff_member_manager", "0001_initial"),
]
operations = [
migrations.RunPython(add_staff_members),
]
""" | mit | -4,347,252,034,574,000,600 | 32.692308 | 112 | 0.682467 | false |
rpdillon/wikid | wikid/__init__.py | 1 | 3539 | #!/usr/bin/env python
# wikid, Copyright (c) 2010, R. P. Dillon <[email protected]>
# This file is part of wikid.
#
# wikid is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
wikid - A Mercurial extension that provides a distributed wiki backed
by Merurial.
The intended use is to provide a distribute Wiki capability using
existing adopted technologies. hgwiki makes use of:
- mercurial for revision control
- web.py for templating and serving the web interface
- DocUtils' implementation of reStructuredText for markup
'''
import web
from wikid import WikiContent, ReadNode, EditNode, PageIndex, DeleteNode
from wikid import NodeHistory, StaticLibs, RecoverNode, PrintNode, Help, Upload, FileManagement
from wikid import getExtensionPath
def wikid(ui, repo, **opts):
"""
Invokes web.py to serve content using the WikiContentEngine.
"""
# Maps the VFS directory locations to the names of the classes
# that handle those requests.
urls = (
'/PageIndex', 'PageIndex',
'/upload', 'Upload',
'/Help', 'Help',
'/files/(.*)', 'FileManagement',
'/history/(.*)', 'NodeHistory',
'/delete/(.*)', 'DeleteNode',
'/recover/(.*)', 'RecoverNode',
'/edit/(.*)', 'EditNode',
'/lib/(.*)', 'StaticLibs',
'/print/(.*)', 'PrintNode',
'/(.*)', 'ReadNode'
)
from mercurial import hg
####
# Path modifications
import sys
# Because the Mercurial installer under Windows does not provide
# the necessary scaffolding to support web.py, we patch the system
# path with the locations of a Python installation
sys.path.append("C:\\Python26\\lib")
# wikid includes some necessary libraries (like DocUtils), so we
# add its path to the system path as well.
sys.path.append(getExtensionPath())
####
# Set the default revision, if none is specified
if opts['rev'] == '':
rev = repo.changelog.nodemap[repo.changelog.tip()]
else:
rev = opts['rev']
#Set up the content engine
WikiContent.setUi(ui)
WikiContent.setRepo(repo)
WikiContent.setRev(rev)
app = web.application(urls, globals())
# Hack to avoid web.py parsing mercurial's command-line args
sys.argv = ['wikid', opts['port']] # Hack to avoid web.py parsing mercurial's command-line args
# Fire off the web.py (CherryPy) server
app.run()
# Specifies the command line parameters and brief descriptions. For
# basic usage, navigate to the repository containing the wiki data and
# simply type:
#
# hg wikid
#
# To set the port, use something like:
#
# hg wikid -p 9000
#
# To set the revision you want to serve:
#
# hg wikid -r 2
cmdtable = {
"wikid": (wikid,
[('r', 'rev', '', 'The revision of the repository to serve.'),
('p', 'port', '8080', 'The port on which to serve.')],
"hg wikid [options]")
}
| gpl-3.0 | -6,462,069,072,412,644,000 | 31.768519 | 99 | 0.658378 | false |
ngokevin/zamboni | mkt/translations/tests/test_helpers.py | 1 | 4925 | from django.conf import settings
from django.utils import translation
import jingo
from mock import Mock, patch
from nose.tools import eq_
import amo
import amo.tests
from mkt.translations import helpers
from mkt.translations.fields import save_signal
from mkt.translations.models import PurifiedTranslation
from mkt.translations.tests.testapp.models import TranslatedModel
from mkt.webapps.models import Webapp
def super():
jingo.load_helpers()
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
# rtl language
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render({'s': t})
eq_(actual, s)
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render({'s': t})
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render({'s': t})
eq_(actual, 'safe ...')
def test_l10n_menu():
# No remove_locale_url provided.
menu = helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = helpers.l10n_menu({'addon': Webapp()},
remove_locale_url='some/url/')
assert 'data-rm-locale="/developers/app/None/rmlocale"' in menu, menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(amo.tests.TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
eq_(helpers.all_locales(addon, field_name), None)
addon = Mock()
field_name = 'description'
del addon.description
eq_(helpers.all_locales(addon, field_name), None)
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = helpers.all_locales(obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
| bsd-3-clause | 3,741,023,886,050,559,500 | 33.683099 | 78 | 0.633503 | false |
jjdmol/LOFAR | CEP/Pipeline/framework/lofarpipe/support/xmllogging.py | 1 | 5594 | """
xml based logging constructs and helpers functions
"""
import xml.dom.minidom as _xml
def add_child(head, name):
"""
Create a node with name. And append it to the node list of the supplied
node. (This function allows duplicate head names as specified by xml)
return the create node.
"""
local_document = _xml.Document()
created_node = local_document.createElement(name)
head.appendChild(created_node)
return created_node
def get_child(node, name):
"""
Return the first direct descendant (child) of the supplied node with
the tagname name. The default xml getchild also looks in child nodes.
Return None if no match is found
"""
for child in node.childNodes:
if child.nodeName == name:
return child
return None
def get_active_stack(calling_object, stack_name="active_stack"):
"""
returns the active stack on the current class
return None of it is not present
"""
if hasattr(calling_object, stack_name):
stack_node = calling_object.__getattribute__(stack_name)
if stack_node.getAttribute("type") == "active_stack":
return stack_node
return None
def add_child_to_active_stack_head(calling_object, child,
stack_name="active_stack"):
"""
Add the supplied child to the current active node in the active stack.
returns the added child on succes, None if not active stack was found.
Selection between active stacks can be done with the stack_name argument
"""
active_stack = get_active_stack(calling_object, stack_name="active_stack")
if not active_stack == None:
active_stack_node = get_child(active_stack, stack_name)
last_child = active_stack_node.lastChild
if last_child != None:
last_child.appendChild(child)
return child
return None
def enter_active_stack(calling_object, child,
stack_name="active_stack", comment=None):
"""
This function adds stack-like behaviour to an object:
On a 'fresh' class an xml node is added as a class attribute. This node
performs stack functionality and allows nested adding of nodes to track
functionality.
If the function is called on a class with an active_stack already present
a nested node is added.
The current nesting is book kept in the active stack. Past calls are
saved for logging purposes.
The comment argument allows adding extra info to a node
"""
active_stack_node = None
stack_node = None
# Check if the calling object has a active stack node with
# name == stack_name
if not hasattr(calling_object, stack_name):
# Create the xml node if it not exists
_throw_away_document = _xml.Document()
stack_node = \
_throw_away_document.createElement(stack_name)
# The xml name of the object is the calling object
stack_node.setAttribute("Name", calling_object.__class__.__name__)
stack_node.setAttribute("type", "active_stack")
# assign the node to the calling class as an attribute
calling_object.__setattr__(stack_name, stack_node)
# add the 'call stack'
active_stack_node = add_child(stack_node, stack_name) # generiek
else:
stack_node = calling_object.__getattribute__(stack_name)
# Find the active stack
active_stack_node = get_child(stack_node, stack_name)
if active_stack_node == None:
active_stack_node = add_child(stack_node, stack_name)
if comment != None:
stack_node.setAttribute("comment", comment)
active_stack_node.setAttribute("info",
"Contains functions not left with a return")
# if child is a string add a xml node with this name
stacked_child = None
if isinstance(child, basestring):
stacked_child = add_child(active_stack_node, child)
# else try adding it as a node
elif isinstance(child, _xml.Node):
active_stack_node.appendChild(child)
stacked_child = child
return stacked_child
def exit_active_stack(calling_object, stack_name="active_stack"):
"""
Mirror function to enter_active_stack.
Performs bookkeeping after leaving a stack:
Add the left node a child of the current active node.
If this is the last active node move it to the 'inactive node' list
"""
# get the named active stack node
if not hasattr(calling_object, stack_name):
raise ValueError(
"Tried leaving an active-stack which"
" has not been entered: stack_name={0} does not exist".format(
stack_name))
active_stack_node = calling_object.__getattribute__(
stack_name)
# get the active stack
active_stack = None
for child_node in active_stack_node.childNodes:
if child_node.nodeName == stack_name:
active_stack = child_node
break
# Get the current last item in the stack
last_child = active_stack.lastChild
# remove it
active_stack.removeChild(last_child)
# Now 'log' the now 'finished' step
if active_stack.lastChild == None:
# add to the main time_logger node
active_stack_node.appendChild(last_child)
else:
# add to the calling node info
active_stack.lastChild.appendChild(last_child)
| gpl-3.0 | -4,474,977,738,795,691,000 | 35.046358 | 78 | 0.63157 | false |
markstoehr/structured_gaussian_mixtures | lstm_gaussian_mixtures/test_negative_loglikelihood.py | 1 | 2280 | %autoindent
import numpy
import theano
from theano import tensor
def numpy_floatX(data):
return numpy.asarray(data, dtype=theano.config.floatX)
num_timesteps = 10
num_sequences = 3
num_dim = 2
num_components = 3
x_n = (numpy.arange(num_timesteps * num_sequences * num_dim,
dtype=theano.config.floatX)
.reshape(num_sequences, num_timesteps, num_dim)
.swapaxes(0, 1))
y_n = (numpy.arange(num_timesteps * num_sequences,
dtype=theano.config.floatX)
.reshape(num_sequences, num_timesteps)
.T + 2)
x = tensor.tensor3('x', dtype=theano.config.floatX)
y = tensor.matrix('y', dtype=theano.config.floatX)
W_n_sigma = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim,))
W_sigma = theano.shared(W_n_sigma, borrow=True, name='W_sigma')
W_n_mu = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim,))
W_mu = theano.shared(W_n_mu, borrow=True, name='W_mu')
W_n_mix = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim, num_components,))
W_mix = theano.shared(W_n_mix, borrow=True, name='W_mix')
# check whether scan does what I think it does
def step(x_, y_, ll_):
v = tensor.mean((x_[:, 1:] - x_[:, :-1])**2, axis=-1)
mu = tensor.dot(x_, W_mu)
invsigma = tensor.maximum(tensor.nnet.sigmoid(
tensor.dot(x_, W_sigma)), 1e-8) / v
return (mu - y_)**2 * invsigma
lls, updates = theano.scan(step, sequences=[x, y],
outputs_info=[tensor.alloc(numpy_floatX(0.),
num_sequences)],
name='lls',
n_steps=num_timesteps)
f_lls = theano.function([x, y], lls)
f_updates = theano.function([], updates)
def sigmoid(z):
less_than_mask = z < -30
greater_than_mask = z > 30
in_range_mask = (- less_than_mask) * (- greater_than_mask)
out = numpy.empty(z.shape, dtype=float)
out[in_range_mask] = 1.0/(1+numpy.exp(-z[in_range_mask]))
out[less_than_mask] = 0.0
out[greater_than_mask] = 1.0
return out
mu_n = numpy.dot(x_n, W_n_mu)
invsigma_n = numpy.maximum(sigmoid(numpy.dot(x_n, W_n_sigma)), 1e-8)
lls_n = (mu_n - y_n)**2 * invsigma_n
| apache-2.0 | -6,745,985,951,908,591,000 | 29.4 | 71 | 0.571053 | false |
Star2Billing/did-control | docs/source/conf.py | 1 | 7294 | # -*- coding: utf-8 -*-
#
# django-admin-tools-stats documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 7 13:28:47 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
APP_DIR = os.path.normpath(os.path.join(os.getcwd(), '../..')) + '/did_control/'
sys.path.insert(0, APP_DIR)
import settings
from django.core.management import setup_environ
setup_environ(settings)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage'] # 'tastypie.ext.tastydoc'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'did-control'
copyright = u'2011, Arezqui Belaid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'did-control-doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index.html', 'did-control.tex', u'did-control Documentation',
u'Arezqui Belaid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'did-control', u'did-control Documentation',
[u'Arezqui Belaid'], 1)
]
| agpl-3.0 | 6,974,228,185,238,109,000 | 32.154545 | 84 | 0.70935 | false |
tensorflow/graphics | tensorflow_graphics/nn/layer/pointnet.py | 1 | 8675 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the PointNet networks.
@inproceedings{qi2017pointnet,
title={Pointnet: Deep learning on point sets
for3d classification and segmentation},
author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
booktitle={Proceedings of the IEEE conference on computer vision and pattern
recognition},
pages={652--660},
year={2017}}
NOTE: scheduling of batchnorm momentum currently not available in keras. However
experimentally, using the batch norm from Keras resulted in better test accuracy
(+1.5%) than the author's [custom batch norm
version](https://github.com/charlesq34/pointnet/blob/master/utils/tf_util.py)
even when coupled with batchnorm momentum decay. Further, note the author's
version is actually performing a "global normalization", as mentioned in the
[tf.nn.moments documentation]
(https://www.tensorflow.org/api_docs/python/tf/nn/moments).
This shorthand notation is used throughout this module:
`B`: Number of elements in a batch.
`N`: The number of points in the point set.
`D`: Number of dimensions (e.g. 2 for 2D, 3 for 3D).
`C`: The number of feature channels.
"""
import tensorflow as tf
from tensorflow_graphics.util import export_api
class PointNetConv2Layer(tf.keras.layers.Layer):
"""The 2D convolution layer used by the feature encoder in PointNet."""
def __init__(self, channels, momentum):
"""Constructs a Conv2 layer.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
Args:
channels: the number of generated feature.
momentum: the momentum of the batch normalization layer.
"""
super(PointNetConv2Layer, self).__init__()
self.channels = channels
self.momentum = momentum
def build(self, input_shape):
"""Builds the layer with a specified input_shape."""
self.conv = tf.keras.layers.Conv2D(
self.channels, (1, 1), input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, N, 1, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, 1, C]`.
"""
return tf.nn.relu(self.bn(self.conv(inputs), training))
class PointNetDenseLayer(tf.keras.layers.Layer):
"""The fully connected layer used by the classification head in pointnet.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
"""
def __init__(self, channels, momentum):
super(PointNetDenseLayer, self).__init__()
self.momentum = momentum
self.channels = channels
def build(self, input_shape):
"""Builds the layer with a specified input_shape."""
self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, C]`.
"""
return tf.nn.relu(self.bn(self.dense(inputs), training))
class VanillaEncoder(tf.keras.layers.Layer):
"""The Vanilla PointNet feature encoder.
Consists of five conv2 layers with (64,64,64,128,1024) output channels.
Note:
PointNetConv2Layer are used instead of tf.keras.layers.Conv2D.
https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py
"""
def __init__(self, momentum=.5):
"""Constructs a VanillaEncoder keras layer.
Args:
momentum: the momentum used for the batch normalization layer.
"""
super(VanillaEncoder, self).__init__()
self.conv1 = PointNetConv2Layer(64, momentum)
self.conv2 = PointNetConv2Layer(64, momentum)
self.conv3 = PointNetConv2Layer(64, momentum)
self.conv4 = PointNetConv2Layer(128, momentum)
self.conv5 = PointNetConv2Layer(1024, momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Computes the PointNet features.
Args:
inputs: a dense tensor of size `[B,N,D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, C=1024]`
"""
x = tf.expand_dims(inputs, axis=2) # [B,N,1,D]
x = self.conv1(x, training) # [B,N,1,64]
x = self.conv2(x, training) # [B,N,1,64]
x = self.conv3(x, training) # [B,N,1,64]
x = self.conv4(x, training) # [B,N,1,128]
x = self.conv5(x, training) # [B,N,1,1024]
x = tf.math.reduce_max(input_tensor=x, axis=1) # [B,1,1024]
return tf.squeeze(x) # [B,1024]
class ClassificationHead(tf.keras.layers.Layer):
"""The PointNet classification head.
The head consists of 2x PointNetDenseLayer layers (512 and 256 channels)
followed by a dropout layer (drop rate=30%) a dense linear layer producing the
logits of the num_classes classes.
"""
def __init__(self, num_classes=40, momentum=0.5, dropout_rate=0.3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for fully connected layer
"""
super(ClassificationHead, self).__init__()
self.dense1 = PointNetDenseLayer(512, momentum)
self.dense2 = PointNetDenseLayer(256, momentum)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.dense3 = tf.keras.layers.Dense(num_classes, activation="linear")
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Computes the classifiation logits given features (note: without softmax).
Args:
inputs: tensor of points with shape `[B,D]`.
training: flag for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
x = self.dense1(inputs, training) # [B,512]
x = self.dense2(x, training) # [B,256]
x = self.dropout(x, training) # [B,256]
return self.dense3(x) # [B,num_classes)
class PointNetVanillaClassifier(tf.keras.layers.Layer):
"""The PointNet 'Vanilla' classifier (i.e. without spatial transformer)."""
def __init__(self, num_classes=40, momentum=.5, dropout_rate=.3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for the classification head.
"""
super(PointNetVanillaClassifier, self).__init__()
self.encoder = VanillaEncoder(momentum)
self.classifier = ClassificationHead(
num_classes=num_classes, momentum=momentum, dropout_rate=dropout_rate)
def call(self, points, training=None): # pylint: disable=arguments-differ
"""Computes the classifiation logits of a point set.
Args:
points: a tensor of points with shape `[B, D]`
training: for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
features = self.encoder(points, training) # (B,1024)
logits = self.classifier(features, training) # (B,num_classes)
return logits
@staticmethod
def loss(labels, logits):
"""The classification model training loss.
Note:
see tf.nn.sparse_softmax_cross_entropy_with_logits
Args:
labels: a tensor with shape `[B,]`
logits: a tensor with shape `[B,num_classes]`
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits
residual = cross_entropy(labels, logits)
return tf.reduce_mean(input_tensor=residual)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| apache-2.0 | -210,864,469,539,070,560 | 34.264228 | 81 | 0.69487 | false |
cfelton/minnesota | mn/cores/fifo/_fifo_sync.py | 1 | 3380 |
# Copyright (c) 2014 Christopher L. Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import log, fmod, ceil
from myhdl import *
from _fifo_intf import check_fifo_intf
from _fifo_intf import _fifobus
from _fifo_mem import m_fifo_mem_generic
def m_fifo_sync(clock, reset, fbus):
""" Simple synchronous FIFO
PORTS
=====
PARAMETERS
==========
"""
# @todo: this is intended to be used for small fast fifo's but it
# can be used for large synchronous fifo as well
N = fbus.size
if fmod(log(N, 2), 1) != 0:
Asz = int(ceil(log(N,2)))
N = 2**Asz
print("@W: m_fifo_sync only supports power of 2 size")
print(" forcing size (depth) to %d instread of " % (N, fbus.size))
wptr = Signal(modbv(0, min=0, max=N))
rptr = Signal(modbv(0, min=0, max=N))
_vld = Signal(False)
# generic memory model
g_fifomem = m_fifo_mem_generic(clock, fbus.wr, fbus.wdata, wptr,
clock, fbus.rdata, rptr,
mem_size=fbus.size)
# @todo: almost full and almost empty flags
read = fbus.rd
write = fbus.wr
@always_seq(clock.posedge, reset=reset)
def rtl_fifo():
if fbus.clear:
wptr.next = 0
rptr.next = 0
fbus.full.next = False
fbus.empty.next = True
elif read and not write:
fbus.full.next = False
if not fbus.empty:
rptr.next = rptr + 1
if rptr == (wptr-1):
fbus.empty.next = True
elif write and not read:
fbus.empty.next = False
if not fbus.full:
wptr.next = wptr + 1
if wptr == (rptr-1):
fbus.full.next = True
elif write and read:
wptr.next = wptr + 1
rptr.next = rptr + 1
_vld.next = read
@always_comb
def rtl_assign():
fbus.rvld.next = _vld & fbus.rd
nvacant = Signal(intbv(N, min=-0, max=N+1)) # # empty slots
ntenant = Signal(intbv(0, min=-0, max=N+1)) # # filled slots
@always_seq(clock.posedge, reset=reset)
def dbg_occupancy():
if fbus.clear:
nvacant.next = N
ntenant.next = 0
else:
v = nvacant
f = ntenant
if fbus.rvld:
v = v + 1
f = f - 1
if fbus.wr:
v = v -1
f = f + 1
nvacant.next = v
ntenant.next = f
fbus.count = ntenant
return (g_fifomem, rtl_fifo, rtl_assign, dbg_occupancy,)
# attached a generic fifo bus object to the module
m_fifo_sync.fbus_intf = _fifobus | gpl-3.0 | 8,068,396,567,377,177,000 | 28.4 | 78 | 0.557692 | false |
njantrania/osf.io | scripts/refresh_box_tokens.py | 1 | 1487 | #!/usr/bin/env python
# encoding: utf-8
import sys
import logging
import datetime
from modularodm import Q
from dateutil.relativedelta import relativedelta
from scripts import utils as scripts_utils
from website.app import init_app
from website.oauth.models import ExternalAccount
from website.addons.base.exceptions import AddonError
from website.addons.box.utils import refresh_oauth_key
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_targets(delta):
return ExternalAccount.find(
Q('expires_at', 'lt', datetime.datetime.utcnow() + delta) &
Q('provider', 'eq', 'box')
)
def main(delta, dry_run):
for record in get_targets(delta):
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record._id,
record.expires_at.strftime('%c')
)
)
if not dry_run:
try:
refresh_oauth_key(record, force=True)
except AddonError as ex:
logger.error(ex.message)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
dry_run = 'dry' in sys.argv
try:
days = int(sys.argv[2])
except (IndexError, ValueError, TypeError):
days = 7 # refresh tokens that expire this week
delta = relativedelta(days=days)
# Log to file
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(delta, dry_run=dry_run)
| apache-2.0 | 3,008,566,150,502,636,000 | 26.537037 | 69 | 0.63618 | false |
denversc/cligen | cligen/target_python.py | 1 | 1177 | # Copyright 2015 Denver Coneybeare <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python target language support for cligen.
"""
from cligen.targets import Jinja2TargetLanguageBase
class PythonTargetLanguage(Jinja2TargetLanguageBase):
def __init__(self):
output_file = self.OutputFileInfo(
name="source file",
default_value="cligen.py",
template_name="python.py",
)
super().__init__(
key="python",
name="python",
output_files=(output_file,),
)
| gpl-3.0 | -7,476,964,423,816,049,000 | 31.694444 | 71 | 0.68819 | false |
chimkentec/KodiMODo_rep | plugin.video.torrenter/resources/scrapers/net.py | 1 | 9741 | # -*- coding: utf-8 -*-
import os
import time
import re
import urllib
import urllib2
import cookielib
import base64
import mimetools
import itertools
import xbmc
import xbmcgui
import xbmcvfs
RE = {
'content-disposition': re.compile('attachment;\sfilename="*([^"\s]+)"|\s')
}
# ################################
#
# HTTP
#
# ################################
class HTTP:
def __init__(self):
self._dirname = xbmc.translatePath('special://temp')
for subdir in ('xbmcup', 'plugin.video.torrenter'):
self._dirname = os.path.join(self._dirname, subdir)
if not xbmcvfs.exists(self._dirname):
xbmcvfs.mkdir(self._dirname)
def fetch(self, request, **kwargs):
self.con, self.fd, self.progress, self.cookies, self.request = None, None, None, None, request
if not isinstance(self.request, HTTPRequest):
self.request = HTTPRequest(url=self.request, **kwargs)
self.response = HTTPResponse(self.request)
xbmc.log('XBMCup: HTTP: request: ' + str(self.request), xbmc.LOGDEBUG)
try:
self._opener()
self._fetch()
except Exception, e:
xbmc.log('XBMCup: HTTP: ' + str(e), xbmc.LOGERROR)
if isinstance(e, urllib2.HTTPError):
self.response.code = e.code
self.response.error = e
else:
self.response.code = 200
if self.fd:
self.fd.close()
self.fd = None
if self.con:
self.con.close()
self.con = None
if self.progress:
self.progress.close()
self.progress = None
self.response.time = time.time() - self.response.time
xbmc.log('XBMCup: HTTP: response: ' + str(self.response), xbmc.LOGDEBUG)
return self.response
def _opener(self):
build = [urllib2.HTTPHandler()]
if self.request.redirect:
build.append(urllib2.HTTPRedirectHandler())
if self.request.proxy_host and self.request.proxy_port:
build.append(urllib2.ProxyHandler(
{self.request.proxy_protocol: self.request.proxy_host + ':' + str(self.request.proxy_port)}))
if self.request.proxy_username:
proxy_auth_handler = urllib2.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'uri', self.request.proxy_username,
self.request.proxy_password)
build.append(proxy_auth_handler)
if self.request.cookies:
self.request.cookies = os.path.join(self._dirname, self.request.cookies)
self.cookies = cookielib.MozillaCookieJar()
if os.path.isfile(self.request.cookies):
self.cookies.load(self.request.cookies)
build.append(urllib2.HTTPCookieProcessor(self.cookies))
urllib2.install_opener(urllib2.build_opener(*build))
def _fetch(self):
params = {} if self.request.params is None else self.request.params
if self.request.upload:
boundary, upload = self._upload(self.request.upload, params)
req = urllib2.Request(self.request.url)
req.add_data(upload)
else:
if self.request.method == 'POST':
if isinstance(params, dict) or isinstance(params, list):
params = urllib.urlencode(params)
req = urllib2.Request(self.request.url, params)
else:
req = urllib2.Request(self.request.url)
for key, value in self.request.headers.iteritems():
req.add_header(key, value)
if self.request.upload:
req.add_header('Content-type', 'multipart/form-data; boundary=%s' % boundary)
req.add_header('Content-length', len(upload))
if self.request.auth_username and self.request.auth_password:
req.add_header('Authorization', 'Basic %s' % base64.encodestring(
':'.join([self.request.auth_username, self.request.auth_password])).strip())
self.con = urllib2.urlopen(req, timeout=self.request.timeout)
# self.con = urllib2.urlopen(req)
self.response.headers = self._headers(self.con.info())
if self.request.download:
self._download()
else:
self.response.body = self.con.read()
if self.request.cookies:
self.cookies.save(self.request.cookies)
def _download(self):
fd = open(self.request.download, 'wb')
if self.request.progress:
self.progress = xbmcgui.DialogProgress()
self.progress.create(u'Download')
bs = 1024 * 8
size = -1
read = 0
name = None
if self.request.progress:
if 'content-length' in self.response.headers:
size = int(self.response.headers['content-length'])
if 'content-disposition' in self.response.headers:
r = RE['content-disposition'].search(self.response.headers['content-disposition'])
if r:
name = urllib.unquote(r.group(1))
while 1:
buf = self.con.read(bs)
if buf == '':
break
read += len(buf)
fd.write(buf)
if self.request.progress:
self.progress.update(*self._progress(read, size, name))
self.response.filename = self.request.download
def _upload(self, upload, params):
res = []
boundary = mimetools.choose_boundary()
part_boundary = '--' + boundary
if params:
for name, value in params.iteritems():
res.append([part_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', value])
if isinstance(upload, dict):
upload = [upload]
for obj in upload:
name = obj.get('name')
filename = obj.get('filename', 'default')
content_type = obj.get('content-type')
try:
body = obj['body'].read()
except AttributeError:
body = obj['body']
if content_type:
res.append([part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.quote(filename)),
'Content-Type: %s' % content_type, '', body])
else:
res.append([part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.quote(filename)), '',
body])
result = list(itertools.chain(*res))
result.append('--' + boundary + '--')
result.append('')
return boundary, '\r\n'.join(result)
def _headers(self, raw):
headers = {}
for line in raw.headers:
pair = line.split(':', 1)
if len(pair) == 2:
tag = pair[0].lower().strip()
value = pair[1].strip()
if tag and value:
headers[tag] = value
return headers
def _progress(self, read, size, name):
res = []
if size < 0:
res.append(1)
else:
res.append(int(float(read) / (float(size) / 100.0)))
if name:
res.append(u'File: ' + name)
if size != -1:
res.append(u'Size: ' + self._human(size))
res.append(u'Load: ' + self._human(read))
return res
def _human(self, size):
human = None
for h, f in (('KB', 1024), ('MB', 1024 * 1024), ('GB', 1024 * 1024 * 1024), ('TB', 1024 * 1024 * 1024 * 1024)):
if size / f > 0:
human = h
factor = f
else:
break
if human is None:
return (u'%10.1f %s' % (size, u'byte')).replace(u'.0', u'')
else:
return u'%10.2f %s' % (float(size) / float(factor), human)
class HTTPRequest:
def __init__(self, url, method='GET', headers=None, cookies=None, params=None, upload=None, download=None,
progress=False, auth_username=None, auth_password=None, proxy_protocol='http', proxy_host=None,
proxy_port=None, proxy_username=None, proxy_password='', timeout=20.0, redirect=True, gzip=False):
if headers is None:
headers = {}
self.url = url
self.method = method
self.headers = headers
self.cookies = cookies
self.params = params
self.upload = upload
self.download = download
self.progress = progress
self.auth_username = auth_username
self.auth_password = auth_password
self.proxy_protocol = proxy_protocol
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.timeout = timeout
self.redirect = redirect
self.gzip = gzip
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ','.join('%s=%r' % i for i in self.__dict__.iteritems()))
class HTTPResponse:
def __init__(self, request):
self.request = request
self.code = None
self.headers = {}
self.error = None
self.body = None
self.filename = None
self.time = time.time()
def __repr__(self):
args = ','.join('%s=%r' % i for i in self.__dict__.iteritems() if i[0] != 'body')
if self.body:
args += ',body=<data>'
else:
args += ',body=None'
return '%s(%s)' % (self.__class__.__name__, args)
| gpl-3.0 | 6,977,368,259,031,119,000 | 31.79798 | 119 | 0.539267 | false |
inveniosoftware-contrib/json-merger | json_merger/conflict.py | 1 | 4958 | # -*- coding: utf-8 -*-
#
# This file is part of Inspirehep.
# Copyright (C) 2016 CERN.
#
# Inspirehep is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Inspirehep is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inspirehep; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import json
from pyrsistent import freeze, thaw
from .utils import force_list
class ConflictType(object):
"""Types of Conflict.
Attributes:
REORDER: The list specified by the path might need to be reordered.
MANUAL_MERGE: The triple specified as the conflict body needs to be
manually merged and added to the conflict path.
ADD_BACK_TO_HEAD: The object specified as the conflict body might
need to be added back to the list specified in the conflict's path.
SET_FIELD: The object specified as the conflict body needs to be
added at the path specified in the conflict object.
REMOVE_FIELD: The value or object present at the path specified in
the path conflict needs to be removed.
INSERT: The object specified as the conflict body needs to be
inserted at the path specified in the conflict object.
"""
pass
_CONFLICTS = (
'REORDER',
'MANUAL_MERGE',
'ADD_BACK_TO_HEAD',
'SET_FIELD',
'REMOVE_FIELD',
'INSERT',
)
for conflict_type in _CONFLICTS:
setattr(ConflictType, conflict_type, conflict_type)
class Conflict(tuple):
"""Immutable and Hashable representation of a conflict.
Attributes:
conflict_type: A :class:`json_merger.conflict.ConflictType` member.
path: A tuple containing the path to the conflictual field.
body: Optional value representing the body of the conflict.
Note:
Even if the conflict body can be any arbitrary object, this is saved
internally as an immutable object so that a Conflict instance can be
safely used in sets or as a dict key.
"""
# Based on http://stackoverflow.com/a/4828108
# Compatible with Python<=2.6
def __new__(cls, conflict_type, path, body):
if conflict_type not in _CONFLICTS:
raise ValueError('Bad Conflict Type %s' % conflict_type)
body = freeze(body)
return tuple.__new__(cls, (conflict_type, path, body))
conflict_type = property(lambda self: self[0])
path = property(lambda self: self[1])
body = property(lambda self: thaw(self[2]))
def with_prefix(self, root_path):
"""Returns a new conflict with a prepended prefix as a path."""
return Conflict(self.conflict_type, root_path + self.path, self.body)
def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
elif self.conflict_type == "INSERT":
op = "add"
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts)
| gpl-2.0 | -1,570,750,061,068,503,300 | 33.430556 | 79 | 0.635942 | false |
theworldbright/mainsite | aspc/courses/models.py | 1 | 9654 | from django.db import models
from django.conf import settings
from datetime import date, datetime, timedelta
import json
from django.core.serializers.json import DjangoJSONEncoder
CAMPUSES = (
(1, u'PO'), (2, u'SC'), (3, u'CMC'), (4, u'HM'), (5, u'PZ'), (6, u'CGU'), (7, u'CU'), (8, u'KS'), (-1, u'?'))
CAMPUSES_FULL_NAMES = {1: 'Pomona', 2: 'Scripps', 3: 'Claremont-McKenna', 4: 'Harvey Mudd', 5: 'Pitzer'}
CAMPUSES_LOOKUP = dict([(a[1], a[0]) for a in CAMPUSES])
# Some campuses are represented more than one way so we make aliases
CAMPUSES_LOOKUP['CM'] = CAMPUSES_LOOKUP['CMC']
CAMPUSES_LOOKUP['CUC'] = CAMPUSES_LOOKUP['CU']
CAMPUSES_LOOKUP['CG'] = CAMPUSES_LOOKUP['CGU']
SESSIONS = ((u'SP', u'Spring'), (u'FA', u'Fall'))
SUBSESSIONS = ((u'P1', u'1'), (u'P2', u'2'))
# TODO: Make this robust for different semesters
# (see the academic calendar at http://catalog.pomona.edu/content.php?catoid=14&navoid=2582)
START_DATE = date(2015, 9, 1)
END_DATE = date(2015, 12, 18)
class Term(models.Model):
key = models.CharField(max_length=20, unique=True)
year = models.PositiveSmallIntegerField()
session = models.CharField(max_length=2, choices=SESSIONS)
def __unicode__(self):
return u'%s %s' % (self.session, self.year)
class Meta:
ordering = ['-year', 'session']
class Instructor(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Department(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
name = models.CharField(max_length=100)
def course_count(self):
return len(self.primary_course_set.all())
def non_breaking_name(self):
return self.name.replace(' ', ' ')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('department_detail', (), {'slug': self.code, })
class RequirementArea(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
name = models.CharField(max_length=100)
campus = models.SmallIntegerField(choices=CAMPUSES)
def course_count(self):
return len(self.course_set.all())
def non_breaking_name(self):
return self.name.replace(' ', ' ')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('requirement_area_detail', (), {'slug': self.code, })
class Course(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
code_slug = models.CharField(max_length=20, unique=True, db_index=True)
number = models.IntegerField(default=0)
name = models.CharField(max_length=256)
primary_department = models.ForeignKey(Department, related_name='primary_course_set', null=True)
departments = models.ManyToManyField(Department, related_name='course_set')
requirement_areas = models.ManyToManyField(RequirementArea, related_name='course_set')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
class Meta:
ordering = ('code',)
class Section(models.Model):
term = models.ForeignKey(Term, related_name='sections')
course = models.ForeignKey(Course, related_name='sections')
code = models.CharField(max_length=20)
code_slug = models.CharField(max_length=20)
instructors = models.ManyToManyField(Instructor, related_name='sections')
grading_style = models.CharField(max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
note = models.TextField(blank=True, null=True)
credit = models.FloatField()
requisites = models.BooleanField(default=False)
fee = models.BooleanField(default=False)
perms = models.IntegerField(null=True)
spots = models.IntegerField(null=True)
filled = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.code
# return u'[%s] %s (%s)' % (
# self.code, self.course.name, ', '.join(self.instructors.all().values_list('name', flatten=True)))
def get_campuses(self):
campuses = []
for mtg in self.meeting_set.all():
campuses.append(mtg.get_campus())
return campuses
def get_campus(self):
campii = self.get_campuses()
if len(campii) > 0:
return self.get_campuses()[0]
else:
return 'UU'
def json(self):
event_list = []
for mtg in self.meeting_set.all():
for begin, end in mtg.to_datetime_ranges():
event_list.append({
'id': '%s-%s-%s' % (self.code, mtg.id, begin.strftime('%w')),
'start': begin,
'end': end,
'title': self.code,
})
return {'events': event_list, 'info': {'course_code': self.code, 'course_code_slug': self.code_slug,
'detail_url': self.get_absolute_url(),
'campus_code': self.get_campus(), }}
@models.permalink
def get_absolute_url(self):
if not self.course.primary_department: print self.course
return ('course_detail', (), {'course_code': self.code_slug, 'dept': self.course.primary_department.code, })
class Meta:
ordering = ('code',)
class Meeting(models.Model):
section = models.ForeignKey(Section)
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
begin = models.TimeField()
end = models.TimeField()
campus = models.SmallIntegerField(choices=CAMPUSES)
location = models.CharField(max_length=100)
def gen_days(self):
s = []
if self.monday: s.append('M')
if self.tuesday: s.append('T')
if self.wednesday: s.append('W')
if self.thursday: s.append('R')
if self.friday: s.append('F')
return s
def to_datetime_ranges(self, base_date=None):
ranges = []
combine_dates = []
# Historical note: the frontend calendar supports navigating week
# by week, but we've turned it into a stripped down week calendar.
#
# Under the hood, it still wants a timestamp for events, though it
# doesn't matter what as long as the day of the week works correctly.
frontend_calendar_start = date(2012, 9, 3)
# Note: the version of JQuery-WeekCalendar we have gets off by two on
# computing day-of-week starting in 2013. Rather than fix this, since
# we don't use the rest of its features, we froze it in the past.
if not base_date:
base_date = frontend_calendar_start
if self.monday:
combine_dates.append(base_date + timedelta(
days=(7 + 0 - base_date.weekday()) % 7 # get correct weekday
# offset depending on
# start date weekday
))
if self.tuesday:
combine_dates.append(base_date + timedelta(
days=(7 + 1 - base_date.weekday()) % 7
))
if self.wednesday:
combine_dates.append(base_date + timedelta(
days=(7 + 2 - base_date.weekday()) % 7
))
if self.thursday:
combine_dates.append(base_date + timedelta(
days=(7 + 3 - base_date.weekday()) % 7
))
if self.friday:
combine_dates.append(base_date + + timedelta(
days=(7 + 4 - base_date.weekday()) % 7
))
for basedate in combine_dates:
begin = datetime.combine(basedate, self.begin)
end = datetime.combine(basedate, self.end)
if end > begin: # Sanity check for malformed meetings in CX
ranges.append((begin, end))
return ranges
def get_campus(self):
return CAMPUSES[self.campus - 1][1] # CAMPUSES is now 1-based
def __unicode__(self):
return u'[%s] Meeting every %s, %s-%s' % (
self.section.code, ''.join(self.gen_days()), self.begin.strftime('%I:%M %p'), self.end.strftime('%I:%M %p'))
class Schedule(models.Model):
sections = models.ManyToManyField(Section)
create_ts = models.DateTimeField(default=datetime.now)
def json(self):
all_sections = []
for section in self.sections.all():
all_sections.append(section.json())
return all_sections
def json_encoded(self):
return json.dumps(self.json(), cls=DjangoJSONEncoder)
@models.permalink
def get_absolute_url(self):
return ('aspc.courses.views.view_schedule', (self.id,))
def outside_url(self):
return u''.join([settings.OUTSIDE_URL_BASE, self.get_absolute_url()])
def __unicode__(self):
return u'Schedule %i' % (self.id,)
class RefreshHistory(models.Model):
FULL = 0
REGISTRATION = 1
run_date = models.DateTimeField(default=datetime.now)
last_refresh_date = models.DateTimeField()
term = models.ForeignKey(Term, related_name='term')
type = models.IntegerField(choices=(
(FULL, 'Full'),
(REGISTRATION, 'Registration'),
))
def __unicode__(self):
return u"{0} refresh at {1}".format(self.get_type_display(), self.last_refresh_date.isoformat())
class Meta:
verbose_name_plural = 'refresh histories'
| mit | -5,253,931,363,524,595,000 | 33.355872 | 120 | 0.610317 | false |
3324fr/spinalcordtoolbox | dev/control_points/make_centerline.py | 1 | 3765 | # Main fonction return the centerline of the mifti image fname as a nifti binary file
# Centerline is generated using sct_nurbs with nbControl = size/div
from sct_nurbs_v2 import *
import nibabel
import splines_approximation_v2 as spline_app
from scipy import ndimage
import numpy
import commands
import linear_fitting as lf
import sct_utils
def returnCenterline(fname = None, nurbs = 0, div = 0):
if fname == None:
fname = 't250_half_sup_straight_seg.nii.gz'
file = nibabel.load(fname)
data = file.get_data()
hdr_seg = file.get_header()
nx, ny, nz = spline_app.getDim(fname)
x = [0 for iz in range(0, nz, 1)]
y = [0 for iz in range(0, nz, 1)]
z = [iz for iz in range(0, nz, 1)]
for iz in range(0, nz, 1):
x[iz], y[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))
points = [[x[n],y[n],z[n]] for n in range(len(x))]
p1, p2, p3 = spline_app.getPxDimensions(fname)
size = spline_app.getSize(x, y, z, p1, p2, p3)
data = data*0
if nurbs:
if check_nurbs(div, size, points) != 0:
x_centerline_fit=P[0]
y_centerline_fit=P[1]
z_centerline_fit=P[2]
for i in range(len(z_centerline_fit)) :
data[int(round(x_centerline_fit[i])),int(round(y_centerline_fit[i])),int(z_centerline_fit[i])] = 1
else: return 1
else:
for i in range(len(z)) :
data[int(round(x[i])),int(round(y[i])),int(z[i])] = 1
path, file_name, ext_fname = sct_utils.extract_fname(fname)
img = nibabel.Nifti1Image(data, None, hdr_seg)
#return img
saveFile(file_name, img, div)
return size
def check_nurbs(div, size = 0, points = 0, centerline = ''):
if centerline == '':
print 'div = ',div,' size = ', round(size)
nurbs = NURBS(int(round(size)), int(div), 3, 3000, points)
P = nurbs.getCourbe3D()
if P==1:
print "ERROR: instability in NURBS computation, div will be incremented. "
return 1
else:
file = nibabel.load(centerline)
data = file.get_data()
hdr_seg = file.get_header()
nx, ny, nz = spline_app.getDim(centerline)
x = [0 for iz in range(0, nz, 1)]
y = [0 for iz in range(0, nz, 1)]
z = [iz for iz in range(0, nz, 1)]
for iz in range(0, nz, 1):
x[iz], y[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))
points = [[x[n],y[n],z[n]] for n in range(len(x))]
p1, p2, p3 = spline_app.getPxDimensions(centerline)
size = spline_app.getSize(x, y, z, p1, p2, p3)
print 'div = ',div,' size = ', round(size)
#nurbs = NURBS(int(round(size)), int(div), 3, 3000, points) --> this work with sct_nurbs_v1
try:
nurbs = NURBS(3, 3000, points, False, None, int(round(size)), int(div))
P = nurbs.getCourbe3D()
except UnboundLocalError:
print "ERROR: instability in NURBS computation, UnboundLocalError caught, div will be incremented. "
return 1
except ZeroDivisionError:
print "ERROR: instability in NURBS computation, ZeroDivisionError caught, div will be incremented. "
return 1
if P==1:
print "ERROR: instability in NURBS computation, div will be incremented. "
return 1
else: return round(size)
def saveFile(file_name, img, div):
path_centerline = './centerlines/'+file_name+'_'+str(div)+'_centerline.nii.gz'
nibabel.save(img,path_centerline)
#cmd = 'sct_straighten_spinalcord -i '+path_centerline+' -c '+fname
#print cmd
#commands.getstatusoutput(cmd)
#cmd = 'sct_propseg'
if __name__ == "__main__":
returnCenterline() | mit | 7,278,184,712,280,968,000 | 31.188034 | 114 | 0.592297 | false |
ceibal-tatu/sugar-toolkit-gtk3 | src/sugar3/graphics/window.py | 1 | 10947 | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2009, Aleksey Lim, Sayamindu Dasgupta
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
STABLE.
"""
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import GdkX11
from gi.repository import Gtk
import warnings
from sugar3.graphics.icon import Icon
from sugar3.graphics import palettegroup
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT = 2
class UnfullscreenButton(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.set_decorated(False)
self.set_resizable(False)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_border_width(0)
self.props.accept_focus = False
#Setup estimate of width, height
valid_, w, h = Gtk.icon_size_lookup(Gtk.IconSize.LARGE_TOOLBAR)
self._width = w
self._height = h
screen = self.get_screen()
screen.connect('size-changed', self._screen_size_changed_cb)
self._button = Gtk.Button()
self._button.set_relief(Gtk.ReliefStyle.NONE)
self._icon = Icon(icon_name='view-return',
icon_size=Gtk.IconSize.LARGE_TOOLBAR)
self._icon.show()
self._button.add(self._icon)
self._button.show()
self.add(self._button)
def connect_button_clicked(self, cb):
self._button.connect('clicked', cb)
def _reposition(self):
x = Gdk.Screen.width() - self._width
self.move(x, 0)
def do_get_preferred_width(self):
minimum, natural = Gtk.Window.do_get_preferred_width(self)
self._width = minimum
self._reposition()
return minimum, natural
def _screen_size_changed_cb(self, screen):
self._reposition()
class Window(Gtk.Window):
def __init__(self, **args):
self._enable_fullscreen_mode = True
GObject.GObject.__init__(self, **args)
self.set_decorated(False)
self.connect('realize', self.__window_realize_cb)
self.connect('key-press-event', self.__key_press_cb)
# OSK support: canvas auto panning based on input focus
if GObject.signal_lookup('request-clear-area', Window) != 0 and \
GObject.signal_lookup('unset-clear-area', Window) != 0:
self.connect('size-allocate', self.__size_allocate_cb)
self.connect('request-clear-area', self.__request_clear_area_cb)
self.connect('unset-clear-area', self.__unset_clear_area_cb)
self._clear_area_dy = 0
self._toolbar_box = None
self._alerts = []
self._canvas = None
self.tray = None
self.__vbox = Gtk.VBox()
self.__hbox = Gtk.HBox()
self.__vbox.pack_start(self.__hbox, True, True, 0)
self.__hbox.show()
self.add_events(Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.TOUCH_MASK)
self.connect('motion-notify-event', self.__motion_notify_cb)
self.connect('button-release-event', self.__button_press_event_cb)
self.add(self.__vbox)
self.__vbox.show()
self._is_fullscreen = False
self._unfullscreen_button = UnfullscreenButton()
self._unfullscreen_button.set_transient_for(self)
self._unfullscreen_button.connect_button_clicked(
self.__unfullscreen_button_clicked)
self._unfullscreen_button_timeout_id = None
def reveal(self):
""" Make window active
In contrast with present(), brings window to the top
even after invoking on response on non-gtk events.
See #1423.
"""
window = self.get_window()
if window is None:
self.show()
return
timestamp = Gtk.get_current_event_time()
if not timestamp:
timestamp = GdkX11.x11_get_server_time(window)
window.focus(timestamp)
def fullscreen(self):
palettegroup.popdown_all()
if self._toolbar_box is not None:
self._toolbar_box.hide()
if self.tray is not None:
self.tray.hide()
self._is_fullscreen = True
if self.props.enable_fullscreen_mode:
self._unfullscreen_button.show()
if self._unfullscreen_button_timeout_id is not None:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
self._unfullscreen_button_timeout_id = \
GObject.timeout_add_seconds( \
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT, \
self.__unfullscreen_button_timeout_cb)
def unfullscreen(self):
if self._toolbar_box is not None:
self._toolbar_box.show()
if self.tray is not None:
self.tray.show()
self._is_fullscreen = False
if self.props.enable_fullscreen_mode:
self._unfullscreen_button.hide()
if self._unfullscreen_button_timeout_id:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
def set_canvas(self, canvas):
if self._canvas:
self.__hbox.remove(self._canvas)
if canvas:
self.__hbox.pack_start(canvas, True, True, 0)
self._canvas = canvas
self.__vbox.set_focus_child(self._canvas)
def get_canvas(self):
return self._canvas
canvas = property(get_canvas, set_canvas)
def get_toolbar_box(self):
return self._toolbar_box
def set_toolbar_box(self, toolbar_box):
if self._toolbar_box:
self.__vbox.remove(self._toolbar_box)
if toolbar_box:
self.__vbox.pack_start(toolbar_box, False, False, 0)
self.__vbox.reorder_child(toolbar_box, 0)
self._toolbar_box = toolbar_box
toolbar_box = property(get_toolbar_box, set_toolbar_box)
def set_tray(self, tray, position):
if self.tray:
box = self.tray.get_parent()
box.remove(self.tray)
if position == Gtk.PositionType.LEFT:
self.__hbox.pack_start(tray, False, False, 0)
elif position == Gtk.PositionType.RIGHT:
self.__hbox.pack_end(tray, False, False, 0)
elif position == Gtk.PositionType.BOTTOM:
self.__vbox.pack_end(tray, False, False, 0)
self.tray = tray
def add_alert(self, alert):
self._alerts.append(alert)
if len(self._alerts) == 1:
self.__vbox.pack_start(alert, False, False, 0)
if self._toolbar_box is not None:
self.__vbox.reorder_child(alert, 1)
else:
self.__vbox.reorder_child(alert, 0)
def remove_alert(self, alert):
if alert in self._alerts:
self._alerts.remove(alert)
# if the alert is the visible one on top of the queue
if alert.get_parent() is not None:
self.__vbox.remove(alert)
if len(self._alerts) >= 1:
self.__vbox.pack_start(self._alerts[0], False, False, 0)
if self._toolbar_box is not None:
self.__vbox.reorder_child(self._alerts[0], 1)
else:
self.__vbox.reorder_child(self._alert[0], 0)
def __window_realize_cb(self, window):
group = Gtk.Window()
group.realize()
window.get_window().set_group(group.get_window())
def __key_press_cb(self, widget, event):
key = Gdk.keyval_name(event.keyval)
if event.get_state() & Gdk.ModifierType.MOD1_MASK:
if self.tray is not None and key == 'space':
self.tray.props.visible = not self.tray.props.visible
return True
elif key == 'Escape' and self._is_fullscreen and \
self.props.enable_fullscreen_mode:
self.unfullscreen()
return True
return False
def __unfullscreen_button_clicked(self, button):
self.unfullscreen()
def __button_press_event_cb(self, widget, event):
self._show_unfullscreen_button()
return False
def __motion_notify_cb(self, widget, event):
self._show_unfullscreen_button()
return False
def _show_unfullscreen_button(self):
if self._is_fullscreen and self.props.enable_fullscreen_mode:
if not self._unfullscreen_button.props.visible:
self._unfullscreen_button.show()
# Reset the timer
if self._unfullscreen_button_timeout_id is not None:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
self._unfullscreen_button_timeout_id = \
GObject.timeout_add_seconds( \
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT, \
self.__unfullscreen_button_timeout_cb)
def __unfullscreen_button_timeout_cb(self):
self._unfullscreen_button.hide()
self._unfullscreen_button_timeout_id = None
return False
def __request_clear_area_cb(self, activity, osk_rect, cursor_rect):
self._clear_area_dy = cursor_rect.y + cursor_rect.height - osk_rect.y
if self._clear_area_dy < 0:
self._clear_area_dy = 0
return False
self.queue_resize()
return True
def __unset_clear_area_cb(self, activity, snap_back):
self._clear_area_dy = 0
self.queue_resize()
return True
def __size_allocate_cb(self, widget, allocation):
self.set_allocation(allocation)
allocation.y -= self._clear_area_dy
self.__vbox.size_allocate(allocation)
def set_enable_fullscreen_mode(self, enable_fullscreen_mode):
self._enable_fullscreen_mode = enable_fullscreen_mode
def get_enable_fullscreen_mode(self):
return self._enable_fullscreen_mode
enable_fullscreen_mode = GObject.property(type=object,
setter=set_enable_fullscreen_mode, getter=get_enable_fullscreen_mode)
| lgpl-2.1 | -8,212,694,695,301,188,000 | 32.787037 | 77 | 0.603636 | false |
YuepengGuo/backtrader | docs/observers-and-statistics/observers-default.py | 1 | 1323 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
import backtrader.feeds as btfeeds
if __name__ == '__main__':
cerebro = bt.Cerebro(stdstats=False)
cerebro.addstrategy(bt.Strategy)
data = bt.feeds.BacktraderCSVData(dataname='../../datas/2006-day-001.txt')
cerebro.adddata(data)
cerebro.run()
cerebro.plot()
| gpl-3.0 | -4,085,089,415,937,773,000 | 36.8 | 79 | 0.628118 | false |
mic4ael/indico | indico/util/suggestions.py | 1 | 5843 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import division, print_function, unicode_literals
from collections import defaultdict
from datetime import date, timedelta
from sqlalchemy.orm import joinedload, load_only
from indico.modules.events import Event
from indico.modules.events.abstracts.util import get_events_with_abstract_persons
from indico.modules.events.contributions.util import get_events_with_linked_contributions
from indico.modules.events.registration.util import get_events_registered
from indico.modules.events.surveys.util import get_events_with_submitted_surveys
from indico.util.date_time import now_utc, utc_to_server
from indico.util.struct.iterables import window
def _get_blocks(events, attended):
blocks = []
block = []
for event in events:
if event not in attended:
if block:
blocks.append(block)
block = []
continue
block.append(event)
if block:
blocks.append(block)
return blocks
def _query_categ_events(categ, start_dt, end_dt):
return (Event.query
.with_parent(categ)
.filter(Event.happens_between(start_dt, end_dt))
.options(load_only('id', 'start_dt', 'end_dt')))
def _get_category_score(user, categ, attended_events, debug=False):
if debug:
print(repr(categ))
# We care about events in the whole timespan where the user attended some events.
# However, this might result in some missed events e.g. if the user was not working for
# a year and then returned. So we throw away old blocks (or rather adjust the start time
# to the start time of the newest block)
first_event_date = attended_events[0].start_dt.replace(hour=0, minute=0)
last_event_date = attended_events[-1].start_dt.replace(hour=0, minute=0) + timedelta(days=1)
blocks = _get_blocks(_query_categ_events(categ, first_event_date, last_event_date), attended_events)
for a, b in window(blocks):
# More than 3 months between blocks? Ignore the old block!
if b[0].start_dt - a[-1].start_dt > timedelta(weeks=12):
first_event_date = b[0].start_dt.replace(hour=0, minute=0)
# Favorite categories get a higher base score
score = int(categ in user.favorite_categories)
if debug:
print('{0:+.3f} - initial'.format(score))
# Attendance percentage goes to the score directly. If the attendance is high chances are good that the user
# is either very interested in whatever goes on in the category or it's something he has to attend regularily.
total = _query_categ_events(categ, first_event_date, last_event_date).count()
if total:
attended_block_event_count = sum(1 for e in attended_events if e.start_dt >= first_event_date)
score += attended_block_event_count / total
if debug:
print('{0:+.3f} - attendance'.format(score))
# If there are lots/few unattended events after the last attended one we also update the score with that
total_after = _query_categ_events(categ, last_event_date + timedelta(days=1), None).count()
if total_after < total * 0.05:
score += 0.25
elif total_after > total * 0.25:
score -= 0.5
if debug:
print('{0:+.3f} - unattended new events'.format(score))
# Lower the score based on how long ago the last attended event was if there are no future events
# We start applying this modifier only if the event has been more than 40 days in the past to avoid
# it from happening in case of monthly events that are not created early enough.
days_since_last_event = (date.today() - last_event_date.date()).days
if days_since_last_event > 40:
score -= 0.025 * days_since_last_event
if debug:
print('{0:+.3f} - days since last event'.format(score))
# For events in the future however we raise the score
now_local = utc_to_server(now_utc())
attending_future = (_query_categ_events(categ, now_local, last_event_date)
.filter(Event.id.in_(e.id for e in attended_events))
.all())
if attending_future:
score += 0.25 * len(attending_future)
if debug:
print('{0:+.3f} - future event count'.format(score))
days_to_future_event = (attending_future[0].start_dt.date() - date.today()).days
score += max(0.1, -(max(0, days_to_future_event - 2) / 4) ** (1 / 3) + 2.5)
if debug:
print('{0:+.3f} - days to next future event'.format(score))
return score
def get_category_scores(user, debug=False):
# XXX: check if we can add some more roles such as 'contributor' to assume attendance
event_ids = set()
event_ids.update(id_
for id_, roles in get_events_with_abstract_persons(user).iteritems()
if 'abstract_submitter' in roles)
event_ids.update(id_
for id_, roles in get_events_with_linked_contributions(user).iteritems()
if 'contribution_submission' in roles)
event_ids |= get_events_registered(user)
event_ids |= get_events_with_submitted_surveys(user)
if not event_ids:
return {}
attended = (Event.query
.filter(Event.id.in_(event_ids), ~Event.is_deleted)
.options(joinedload('category'))
.order_by(Event.start_dt, Event.id)
.all())
categ_events = defaultdict(list)
for event in attended:
categ_events[event.category].append(event)
return dict((categ, _get_category_score(user, categ, events, debug))
for categ, events in categ_events.iteritems())
| mit | 1,942,233,109,239,338,800 | 45.007874 | 114 | 0.655827 | false |
coreequip/xbmc-addon-service-watchedlist | service.py | 1 | 62979 | """
This file contains the class of the addon
Settings for this addon:
w_movies
'true', 'false': save watched state of movies
w_episodes
'true', 'false': save watched state of movies
autostart
delay
delay after startup in minutes: '0', '5', '10', ...
starttype
'0' = No autostart
'1' = One Execution after xbmc start
'2' = Periodic start of the addon
interval
watch_user
progressdialog
db_format
'0' = SQLite File
'1' = MYSQL Server
extdb
'true', 'false': Use external database file
dbpath
String: Specify path to external database file
dbfilename
dbbackup
mysql_server
mysql_port
mysql_db
mysql_user
mysql_pass
"""
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
import re
import sys, os
import unicodedata
import time
import sqlite3
import mysql.connector
import buggalo
buggalo.GMAIL_RECIPIENT = "[email protected]"
# buggalo.SUBMIT_URL = 'http://msahadl.ms.funpic.de/buggalo-web/submit.php'
import resources.lib.utils as utils
if utils.getSetting('dbbackup') == 'true':
import zipfile
import datetime
#
class WatchedList:
"""
Main class of the add-on
"""
def __init__(self):
"""
Initialize Class, default values for all class variables
"""
self.watchedmovielist_wl = list([]) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6lastChange
self.watchedepisodelist_wl = list([]) # 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5empty, 6lastChange
self.watchedmovielist_xbmc = list([]) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
self.watchedepisodelist_xbmc = list([]) # 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5name, 6empty, 7episodeid
self.tvshows = {} # dict: key=xbmcid, value=[imdbnumber, showname]
self.tvshownames = {} #dict: key=imdbnumber, value=showname
self.sqlcon = 0
self.sqlcursor = 0
self.db_method = 'file' # either 'file' or 'mysql'
# flag to remember copying the databasefile if requested
self.dbcopydone = False
self.watch_user_changes_count = 0
# normal access of files or access over the xbmc virtual file system (on unix)
self.dbfileaccess = 'normal'
self.dbpath = ''
self.dbdirectory = ''
def runProgram(self):
"""Main function to call other functions
infinite loop for periodic database update
Returns:
return codes:
0 success
3 error/exit
"""
try:
# workaround to disable autostart, if requested
if utils.getSetting("autostart") == 'false':
return 0
utils.buggalo_extradata_settings()
utils.footprint()
# wait the delay time after startup
delaytime = float(utils.getSetting("delay")) * 60 # in seconds
utils.log(u'Delay time before execution: %d seconds' % delaytime, xbmc.LOGDEBUG)
utils.showNotification(utils.getString(32101), utils.getString(32004)%float(utils.getSetting("delay")))
if utils.sleepsafe(delaytime):
return 0
# load all databases
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
utils.showNotification(utils.getString(32102), utils.getString(32601))
return 3
if len(self.tvshownames) == 0: self.sync_tvshows()
if len(self.watchedmovielist_wl) == 0: self.get_watched_wl(1)
if len(self.watchedmovielist_xbmc) == 0: self.get_watched_xbmc(1)
executioncount = 0
idletime = 0
if utils.getSetting("watch_user") == 'true': utils.showNotification(utils.getString(32101), utils.getString(32005))
# handle the periodic execution
while float(utils.getSetting("starttype")) > 0 or utils.getSetting("watch_user") == 'true':
starttime = time.time()
# determine sleeptime before next full watched-database update
if utils.getSetting("starttype") == '1' and executioncount == 0: # one execution after startup
sleeptime = 0
elif utils.getSetting("starttype") == '2': # periodic execution
if executioncount == 0: # with periodic execution, one update after startup and then periodic
sleeptime = 0
else:
sleeptime = float(utils.getSetting("interval")) * 3600 # wait interval until next startup in [seconds]
# wait and then update again
utils.log(u'wait %d seconds until next update' % sleeptime)
utils.showNotification(utils.getString(32101), utils.getString(32003)%(sleeptime/3600))
else: # no autostart, only watch user
sleeptime = 3600 # arbitrary time for infinite loop
# workaround to sleep the requested time. When using the sleep-function, xbmc can not exit
while 1:
if xbmc.abortRequested: return 1
# check if user changes arrived
if utils.getSetting("watch_user") == 'true':
idletime_old = idletime
idletime = xbmc.getGlobalIdleTime() # xbmc idletime in seconds
# check if user could have made changes and process these changes to the wl database
self.watch_user_changes(idletime_old, idletime)
# check if time for update arrived
if time.time() > starttime + sleeptime:
break
xbmc.sleep(1000) # wait 1 second until next check if xbmc terminates
# perform full update
if utils.getSetting("starttype") == '1' and executioncount == 0 or utils.getSetting("starttype") == '2':
self.runUpdate(False)
executioncount += 1
# check for exiting program
if float(utils.getSetting("starttype")) < 2 and utils.getSetting("watch_user") == 'false':
return 0 # the program may exit. No purpose for background process
return 0
except:
buggalo.onExceptionRaised()
def runUpdate(self, manualstart):
"""entry point for manual start.
perform the update step by step
Args:
manualstart: True if called manually
Returns:
return code:
0 success
3 Error opening database
4 Error getting watched state from addon database
5 Error getting watched state from xbmc database
6 Error writing WL Database
7 Error writing XBMC database
"""
try:
utils.buggalo_extradata_settings()
# check if player is running before doing the update. Only do this check for automatic start
while xbmc.Player().isPlaying() == True and not manualstart:
if utils.sleepsafe(60*1000): return 1 # wait one minute until next check for active playback
if xbmc.Player().isPlaying() == False:
if utils.sleepsafe(180*1000): return 1 # wait 3 minutes so the dialogue does not pop up directly after the playback ends
# load the addon-database
if self.load_db(True): # True: Manual start
utils.showNotification(utils.getString(32102), utils.getString(32601))
return 3
if self.sync_tvshows():
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 5
# get the watched state from the addon
if self.get_watched_wl(0):
utils.showNotification(utils.getString(32102), utils.getString(32602))
return 4
# get watched state from xbmc
if self.get_watched_xbmc(0):
utils.showNotification(utils.getString(32102), utils.getString(32603))
return 5
if self.sync_tvshows():
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 5
# import from xbmc into addon database
res = self.write_wl_wdata()
if res == 2: # user exit
return 0
elif res == 1: # error
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 6
# close the sqlite database (addon)
self.close_db() # should be closed by the functions directly accessing the database
# export from addon database into xbmc database
res = self.write_xbmc_wdata((utils.getSetting("progressdialog") == 'true'), 2)
if res == 2: # user exit
return 0
elif res == 1: # error
utils.showNotification(utils.getString(32102), utils.getString(32605))
return 7
utils.showNotification(utils.getString(32101), utils.getString(32107))
utils.log(u'runUpdate exited with success', xbmc.LOGDEBUG)
return 0
except:
buggalo.onExceptionRaised()
def load_db(self, manualstart=False):
"""Load WL database
Args:
manualstart: True if called manually; only retry opening db once
Returns:
return code:
0 successfully opened database
1 error
2 shutdown (serious error in subfunction)
"""
try:
if utils.getSetting("db_format") == '0':
# SQlite3 database in a file
# load the db path
if utils.getSetting("extdb") == 'false':
# use the default file
self.dbdirectory = xbmc.translatePath( utils.data_dir() ).decode('utf-8')
buggalo.addExtraData('dbdirectory', self.dbdirectory);
self.dbpath = os.path.join( self.dbdirectory , "watchedlist.db" )
else:
wait_minutes = 1 # retry waittime if db path does not exist/ is offline
while xbmc.abortRequested == False:
# use a user specified file, for example to synchronize multiple clients
self.dbdirectory = xbmc.translatePath( utils.getSetting("dbpath") ).decode('utf-8')
self.dbfileaccess = utils.fileaccessmode(self.dbdirectory)
self.dbdirectory = utils.translateSMB(self.dbdirectory)
self.dbpath = os.path.join( self.dbdirectory , utils.getSetting("dbfilename").decode('utf-8') )
# xbmc.validatePath(self.dbdirectory) # does not work for smb
if not xbmcvfs.exists(self.dbdirectory): # do not use os.path.exists to access smb:// paths
if manualstart:
utils.log(u'db path does not exist: %s' % self.dbdirectory, xbmc.LOGWARNING)
return 1 # error
else:
utils.log(u'db path does not exist, wait %d minutes: %s' % (wait_minutes, self.dbdirectory), xbmc.LOGWARNING)
utils.showNotification(utils.getString(32102), utils.getString(32002) % self.dbdirectory )
# Wait "wait_minutes" minutes until next check for file path (necessary on network shares, that are offline)
wait_minutes += wait_minutes # increase waittime until next check
if utils.sleepsafe(wait_minutes*60): return 2
else:
break # directory exists, continue below
# on unix, smb-shares can not be accessed with sqlite3 --> copy the db with xbmc file system operations and work in mirror directory
buggalo.addExtraData('dbfileaccess', self.dbfileaccess);
buggalo.addExtraData('dbdirectory', self.dbdirectory);
buggalo.addExtraData('dbpath', self.dbpath);
if self.dbfileaccess == 'copy':
self.dbdirectory_copy = self.dbdirectory
self.dbpath_copy = self.dbpath # path to db file as in the settings (but not accessable)
buggalo.addExtraData('dbdirectory_copy', self.dbdirectory_copy);
buggalo.addExtraData('dbpath_copy', self.dbpath_copy);
# work in copy directory in the xbmc profile folder
self.dbdirectory = os.path.join( xbmc.translatePath( utils.data_dir() ).decode('utf-8'), 'dbcopy')
if not xbmcvfs.exists(self.dbdirectory):
xbmcvfs.mkdir(self.dbdirectory)
utils.log(u'created directory %s' % str(self.dbdirectory))
self.dbpath = os.path.join( self.dbdirectory , "watchedlist.db" )
if xbmcvfs.exists(self.dbpath_copy):
success = xbmcvfs.copy(self.dbpath_copy, self.dbpath) # copy the external db file to local mirror directory
utils.log(u'copied db file %s -> %s. Success: %d' % (self.dbpath_copy, self.dbpath, success), xbmc.LOGDEBUG)
buggalo.addExtraData('dbdirectory', self.dbdirectory);
buggalo.addExtraData('dbpath', self.dbpath);
#connect to the database. create database if it does not exist
self.sqlcon = sqlite3.connect(self.dbpath);
self.sqlcursor = self.sqlcon.cursor()
else:
# MySQL Database on a server
self.sqlcon = mysql.connector.connect(user=utils.getSetting("mysql_user"), password=utils.getSetting("mysql_pass"), database=utils.getSetting("mysql_db"), host=utils.getSetting("mysql_server"), port=utils.getSetting("mysql_port"))
self.sqlcursor = self.sqlcon.cursor()
# create tables if they don't exist
if utils.getSetting("db_format") == '0': # sqlite file
sql = "CREATE TABLE IF NOT EXISTS movie_watched (idMovieImdb INTEGER PRIMARY KEY,playCount INTEGER,lastChange INTEGER,lastPlayed INTEGER,title TEXT)"
self.sqlcursor.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS episode_watched (idShow INTEGER, season INTEGER, episode INTEGER, playCount INTEGER,lastChange INTEGER,lastPlayed INTEGER, PRIMARY KEY (idShow, season, episode))"
self.sqlcursor.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS tvshows (idShow INTEGER, title TEXT, PRIMARY KEY (idShow))"
self.sqlcursor.execute(sql)
else: # mysql network database
sql = ("CREATE TABLE IF NOT EXISTS `movie_watched` ("
"`idMovieImdb` int unsigned NOT NULL,"
"`playCount` tinyint unsigned DEFAULT NULL,"
"`lastChange` timestamp NULL DEFAULT NULL,"
"`lastPlayed` timestamp NULL DEFAULT NULL,"
"`title` text,"
"PRIMARY KEY (`idMovieImdb`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
sql = ("CREATE TABLE IF NOT EXISTS `episode_watched` ("
"`idShow` int unsigned NOT NULL DEFAULT '0',"
"`season` smallint unsigned NOT NULL DEFAULT '0',"
"`episode` smallint unsigned NOT NULL DEFAULT '0',"
"`playCount` tinyint unsigned DEFAULT NULL,"
"`lastChange` timestamp NULL DEFAULT NULL,"
"`lastPlayed` timestamp NULL DEFAULT NULL,"
"PRIMARY KEY (`idShow`,`season`,`episode`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
sql = ("CREATE TABLE IF NOT EXISTS `tvshows` ("
"`idShow` int unsigned NOT NULL,"
"`title` text,"
"PRIMARY KEY (`idShow`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
buggalo.addExtraData('db_connstatus', 'connected')
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u"Database error while opening %s. '%s'" % (self.dbpath, errstring), xbmc.LOGERROR)
self.close_db()
buggalo.addExtraData('db_connstatus', 'sqlite3 error, closed')
return 1
except mysql.connector.Error as err:
# Catch common mysql errors and show them to guide the user
utils.log(u"Database error while opening mySQL DB %s [%s:%s@%s]. %s" % (utils.getSetting("mysql_db"), utils.getSetting("mysql_user"), utils.getSetting("mysql_pass"), utils.getSetting("mysql_db"), err), xbmc.LOGERROR)
if err.errno == mysql.connector.errorcode.ER_DBACCESS_DENIED_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32210) % (utils.getSetting("mysql_user"), utils.getSetting("mysql_db")))
elif err.errno == mysql.connector.errorcode.ER_ACCESS_DENIED_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32208))
elif err.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32209) % utils.getSetting("mysql_db") )
buggalo.addExtraData('db_connstatus', 'mysql error, closed')
self.close_db()
return 1
except:
utils.log(u"Error while opening %s: %s" % (self.dbpath, sys.exc_info()[2]), xbmc.LOGERROR)
self.close_db()
buggalo.addExtraData('dbpath', self.dbpath)
buggalo.addExtraData('db_connstatus', 'error, closed')
buggalo.onExceptionRaised()
return 1
# only commit the changes if no error occured to ensure database persistence
self.sqlcon.commit()
return 0
def close_db(self):
"""Close WL database
Returns:
return code:
0 successfully closed database
1 error
"""
if self.sqlcon:
self.sqlcon.close()
self.sqlcon = 0
# copy the db file back to the shared directory, if needed
if utils.getSetting("db_format") == '0' and self.dbfileaccess == 'copy':
if xbmcvfs.exists(self.dbpath):
success = xbmcvfs.copy(self.dbpath, self.dbpath_copy)
utils.log(u'copied db file %s -> %s. Success: %d' % (self.dbpath, self.dbpath_copy, success), xbmc.LOGDEBUG)
if not success:
utils.showNotification(utils.getString(32102), utils.getString(32606) % self.dbpath )
return 1
buggalo.addExtraData('db_connstatus', 'closed')
return 0
# cursor is not changed -> error
def get_watched_xbmc(self, silent):
"""Get Watched States of XBMC Database
Args:
silent: Do not show notifications if True
Returns:
return code:
0 success
1 error
"""
try:
############################################
# first tv shows with TheTVDB-ID, then tv episodes
if utils.getSetting("w_episodes") == 'true':
############################################
# get imdb tv-show id from xbmc database
utils.log(u'get_watched_xbmc: Get all episodes from xbmc database', xbmc.LOGDEBUG)
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetTVShows",
"params": {
"properties": ["title", "imdbnumber"],
"sort": { "order": "ascending", "method": "title" }
},
"id": 1})
if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key('tvshows'):
for item in json_response['result']['tvshows']:
tvshowId_xbmc = int(item['tvshowid'])
try:
# check if series number is in imdb-format (scraper=imdb?)
res = re.compile('tt(\d+)').findall(item['imdbnumber'])
if len(res) == 0:
# number in thetvdb-format
tvshowId_imdb = int(item['imdbnumber'])
else:
# number in imdb-format
tvshowId_imdb = int(res[0])
except:
utils.log(u'get_watched_xbmc: tv show "%s" has no imdb-number in database. tvshowid=%d Try rescraping.' % (item['title'], tvshowId_xbmc), xbmc.LOGDEBUG)
continue
self.tvshows[tvshowId_xbmc] = list([tvshowId_imdb, item['title']])
self.tvshownames[tvshowId_imdb] = item['title']
# Get all watched movies and episodes by unique id from xbmc-database via JSONRPC
self.watchedmovielist_xbmc = list([])
self.watchedepisodelist_xbmc = list([])
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'get_watched_xbmc: Get all %ss from xbmc database' % modus, xbmc.LOGDEBUG)
if modus == 'movie':
# use the JSON-RPC to access the xbmc-database.
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovies",
"params": {
"properties": ["title", "year", "imdbnumber", "lastplayed", "playcount"],
"sort": { "order": "ascending", "method": "title" }
},
"id": 1
})
else:
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetEpisodes",
"params": {
"properties": ["tvshowid", "season", "episode", "playcount", "showtitle", "lastplayed"]
},
"id": 1
})
if modus == 'movie': searchkey = 'movies'
else: searchkey = 'episodes'
if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key(searchkey):
# go through all watched movies and save them in the class-variable self.watchedmovielist_xbmc
for item in json_response['result'][searchkey]:
if modus == 'movie':
name = item['title'] + ' (' + str(item['year']) + ')'
res = re.compile('tt(\d+)').findall(item['imdbnumber'])
if len(res) == 0:
# no imdb-number for this movie in database. Skip
utils.log(u'get_watched_xbmc: Movie %s has no imdb-number in database. movieid=%d Try rescraping' % (name, int(item['movieid'])), xbmc.LOGDEBUG)
continue
imdbId = int(res[0])
else: # episodes
tvshowId_xbmc = item['tvshowid']
name = '%s S%02dE%02d' % (item['showtitle'], item['season'], item['episode'])
try:
tvshowId_imdb = self.tvshows[tvshowId_xbmc][0]
except:
utils.log(u'get_watched_xbmc: xbmc tv showid %d is not in table xbmc-tvshows. Skipping %s' % (item['tvshowid'], name), xbmc.LOGWARNING)
continue
lastplayed = utils.sqlDateTimeToTimeStamp(item['lastplayed']) # convert to integer-timestamp
playcount = int(item['playcount'])
# add data to the class-variables
if modus == 'movie':
self.watchedmovielist_xbmc.append(list([imdbId, 0, 0, lastplayed, playcount, name, 0, int(item['movieid'])]))# 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
else:
self.watchedepisodelist_xbmc.append(list([tvshowId_imdb, int(item['season']), int(item['episode']), lastplayed, playcount, name, 0, int(item['episodeid'])]))
if not silent: utils.showNotification( utils.getString(32101), utils.getString(32299)%(len(self.watchedmovielist_xbmc), len(self.watchedepisodelist_xbmc)) )
return 0
except:
utils.log(u'get_watched_xbmc: error getting the xbmc database : %s' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
def get_watched_wl(self, silent):
"""Get Watched States of WL Database
Args:
silent: Do not show notifications if True
Returns:
return code:
0 successfully got watched states from WL-database
1 unknown error (programming related)
2 shutdown (error in subfunction)
3 error related to opening the database
"""
try:
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
# get watched movies from addon database
self.watchedmovielist_wl = list([])
if utils.getSetting("w_movies") == 'true':
utils.log(u'get_watched_wl: Get watched movies from WL database', xbmc.LOGDEBUG)
if utils.getSetting("db_format") == '0': # SQLite3 File. Timestamp stored as integer
self.sqlcursor.execute("SELECT idMovieImdb, lastPlayed, playCount, title, lastChange FROM movie_watched ORDER BY title")
else: # mySQL: Create integer timestamp with the request
self.sqlcursor.execute("SELECT `idMovieImdb`, UNIX_TIMESTAMP(`lastPlayed`), `playCount`, `title`, UNIX_TIMESTAMP(`lastChange`) FROM `movie_watched` ORDER BY `title`")
rows = self.sqlcursor.fetchall()
for row in rows:
self.watchedmovielist_wl.append(list([int(row[0]), 0, 0, int(row[1]), int(row[2]), row[3], int(row[4])])) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6lastChange
# get watched episodes from addon database
self.watchedepisodelist_wl = list([])
if utils.getSetting("w_episodes") == 'true':
utils.log(u'get_watched_wl: Get watched episodes from WL database', xbmc.LOGDEBUG)
if utils.getSetting("db_format") == '0': # SQLite3 File. Timestamp stored as integer
self.sqlcursor.execute("SELECT idShow, season, episode, lastPlayed, playCount, lastChange FROM episode_watched ORDER BY idShow, season, episode")
else: # mySQL: Create integer timestamp with the request
self.sqlcursor.execute("SELECT `idShow`, `season`, `episode`, UNIX_TIMESTAMP(`lastPlayed`), `playCount`, UNIX_TIMESTAMP(`lastChange`) FROM `episode_watched` ORDER BY `idShow`, `season`, `episode`")
rows = self.sqlcursor.fetchall()
for row in rows:
try:
name = '%s S%02dE%02d' % (self.tvshownames[int(row[0])], int(row[1]), int(row[2]))
except:
name = 'tvdb-id %d S%02dE%02d' % (int(row[0]), int(row[1]), int(row[2]))
self.watchedepisodelist_wl.append(list([int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), name, int(row[5])]))# 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5name, 6lastChange
if not silent: utils.showNotification(utils.getString(32101), utils.getString(32298)%(len(self.watchedmovielist_wl), len(self.watchedepisodelist_wl)))
self.close_db()
return 0
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'get_watched_wl: SQLite Database error getting the wl database. %s' % errstring, xbmc.LOGERROR)
self.close_db()
# error could be that the database is locked (for tv show strings). This is not an error to disturb the other functions
return 3
except mysql.connector.Error as err:
utils.log(u'get_watched_wl: MySQL Database error getting the wl database. %s' % err, xbmc.LOGERROR)
return 3
except:
utils.log(u'get_watched_wl: Error getting the wl database : %s' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
def sync_tvshows(self):
"""Sync List of TV Shows between WL and XBMC Database
Returns:
return code:
0 successfully synched tv shows
1 database access error
2 database loading error
"""
try:
utils.log(u'sync_tvshows: sync tvshows with wl database : %s' % sys.exc_info()[2], xbmc.LOGDEBUG)
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
# write eventually new tv shows to wl database
for xbmcid in self.tvshows:
if utils.getSetting("db_format") == '0': # sqlite3
sql = "INSERT OR IGNORE INTO tvshows (idShow,title) VALUES (?, ?)"
else: # mysql
sql = "INSERT IGNORE INTO tvshows (idShow,title) VALUES (%s, %s)"
values = self.tvshows[xbmcid]
self.sqlcursor.execute(sql, values)
self.database_copy()
self.sqlcon.commit()
# get all known tv shows from wl database
self.sqlcursor.execute("SELECT idShow, title FROM tvshows")
rows = self.sqlcursor.fetchall()
for i in range(len(rows)):
self.tvshownames[int(rows[i][0])] = rows[i][1]
self.close_db()
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'sync_tvshows: SQLite Database error accessing the wl database: ''%s''' % errstring, xbmc.LOGERROR)
self.close_db()
# error could be that the database is locked (for tv show strings).
return 1
except mysql.connector.Error as err:
utils.log(u"sync_tvshows: MySQL Database error accessing the wl database: ''%s''" % (err), xbmc.LOGERROR)
self.close_db()
return 1
except:
utils.log(u'sync_tvshows: Error getting the wl database: ''%s''' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
return 0
def write_wl_wdata(self):
"""Go through all watched movies from xbmc and check whether they are up to date in the addon database
Returns:
return code:
0 successfully written WL
1 program exception
2 database loading error
"""
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'write_wl_wdata: Write watched %ss to WL database' % modus, xbmc.LOGDEBUG)
count_insert = 0
count_update = 0
if utils.getSetting("progressdialog") == 'true':
DIALOG_PROGRESS = xbmcgui.DialogProgress()
DIALOG_PROGRESS.create( utils.getString(32101) , utils.getString(32105))
if modus == 'movie':
list_length = len(self.watchedmovielist_xbmc)
else:
list_length = len(self.watchedepisodelist_xbmc)
for i in range(list_length):
if xbmc.abortRequested: break # this loop can take some time in debug mode and prevents xbmc exit
if utils.getSetting("progressdialog") == 'true' and DIALOG_PROGRESS.iscanceled():
if modus == 'movie': strno = 32202
else: strno = 32203;
utils.showNotification(utils.getString(strno), utils.getString(32301)%(count_insert, count_update))
return 2
if modus == 'movie':
row_xbmc = self.watchedmovielist_xbmc[i]
else:
row_xbmc = self.watchedepisodelist_xbmc[i]
if utils.getSetting("progressdialog") == 'true':
DIALOG_PROGRESS.update(100*(i+1)/list_length, utils.getString(32105), utils.getString(32610) % (i+1, list_length, row_xbmc[5]) )
try:
count = self.wl_update_media(modus, row_xbmc, 0, 0)
count_insert += count[0]; count_update += count[1];
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'write_wl_wdata: SQLite Database error ''%s'' while updating %s %s' % (errstring, modus, row_xbmc[5]), xbmc.LOGERROR)
# error at this place is the result of duplicate movies, which produces a DUPLICATE PRIMARY KEY ERROR
return 1
except mysql.connector.Error as err:
utils.log(u'write_wl_wdata: MySQL Database error ''%s'' while updating %s %s' % (err, modus, row_xbmc[5]), xbmc.LOGERROR)
self.close_db()
return 1 # error while writing. Do not continue with episodes, if movies raised an exception
except:
utils.log(u'write_wl_wdata: Error while updating %s %s: %s' % (modus, row_xbmc[5], sys.exc_info()[2]), xbmc.LOGERROR)
self.close_db()
if utils.getSetting("progressdialog") == 'true': DIALOG_PROGRESS.close()
buggalo.addExtraData('count_update', count_update); buggalo.addExtraData('count_insert', count_insert);
buggalo.onExceptionRaised()
return 1
if utils.getSetting("progressdialog") == 'true': DIALOG_PROGRESS.close()
# only commit the changes if no error occured to ensure database persistence
if count_insert > 0 or count_update > 0:
self.database_copy()
self.sqlcon.commit()
if modus == 'movie': strno = [32202, 32301]
else: strno = [32203, 32301];
utils.showNotification(utils.getString(strno[0]), utils.getString(strno[1])%(count_insert, count_update))
self.close_db()
return 0
def write_xbmc_wdata(self, progressdialogue, notifications):
"""Go through all watched movies/episodes from the wl-database and check,
if the xbmc-database is up to date
Args:
progressdialogue: Show Progress Bar if True
notifications: 0= no, 1=only changed info, 2=all
Returns:
return code:
0 successfully written XBMC database
1 program exception
2 cancel by user interaction
"""
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'write_xbmc_wdata: Write watched %ss to xbmc database (pd=%d, noti=%d)' % (modus, progressdialogue, notifications), xbmc.LOGDEBUG)
count_update = 0
if progressdialogue:
DIALOG_PROGRESS = xbmcgui.DialogProgress()
DIALOG_PROGRESS.create( utils.getString(32101), utils.getString(32106))
# list to iterate over
if modus == 'movie':
list_length = len(self.watchedmovielist_wl)
else:
list_length = len(self.watchedepisodelist_wl)
# iterate over wl-list
for j in range(list_length):
if xbmc.abortRequested: break # this loop can take some time in debug mode and prevents xbmc exit
if progressdialogue and DIALOG_PROGRESS.iscanceled():
if notifications > 0: utils.showNotification(utils.getString(32204), utils.getString(32302)%(count_update))
return 2
# get media-specific list items
if modus == 'movie':
row_wl = self.watchedmovielist_wl[j]
else:
row_wl = self.watchedepisodelist_wl[j]
season = row_wl[1]
episode = row_wl[2]
imdbId = row_wl[0]
name = row_wl[5]
if progressdialogue:
DIALOG_PROGRESS.update(100*(j+1)/list_length, utils.getString(32106), utils.getString(32610) % (j+1, list_length, name) )
try:
# search the unique movie/episode id in the xbmc-list
if modus == 'movie':
indices = [i for i, x in enumerate(self.watchedmovielist_xbmc) if x[0] == imdbId] # the movie can have multiple entries in xbmc
else:
indices = [i for i, x in enumerate(self.watchedepisodelist_xbmc) if x[0] == imdbId and x[1] == season and x[2] == episode]
lastplayed_wl = row_wl[3]
playcount_wl = row_wl[4]
lastchange_wl = row_wl[6]
if len(indices) > 0:
# the movie/episode is already in the xbmc-list
for i in indices:
if modus == 'movie':
row_xbmc = self.watchedmovielist_xbmc[i]
else:
row_xbmc = self.watchedepisodelist_xbmc[i]
lastplayed_xbmc = row_xbmc[3]
playcount_xbmc = row_xbmc[4]
change_xbmc_db = False
# check if movie/episode is set as unwatched in the wl database
if playcount_wl != playcount_xbmc and lastchange_wl > lastplayed_xbmc:
change_xbmc_db = True
# compare playcount and lastplayed (update if xbmc data is older)
if playcount_xbmc < playcount_wl or (lastplayed_xbmc < lastplayed_wl and playcount_wl > 0):
change_xbmc_db = True
if not change_xbmc_db:
if utils.getSetting("debug") == 'true':
# utils.log(u'write_xbmc_wdata: xbmc database up-to-date for tt%d, %s' % (imdbId, row_xbmc[2]), xbmc.LOGDEBUG)
pass
continue
# check if the lastplayed-timestamp in wl is useful
if playcount_wl == 0:
lastplayed_new = 0
else:
if lastplayed_wl == 0:
lastplayed_new = lastplayed_xbmc
else:
lastplayed_new = lastplayed_wl
# update database
mediaid = row_xbmc[7]
if modus == 'movie': jsonmethod = "VideoLibrary.SetMovieDetails"; idfieldname = "movieid"
else: jsonmethod = "VideoLibrary.SetEpisodeDetails"; idfieldname = "episodeid"
jsondict = {
"jsonrpc": "2.0",
"method": jsonmethod,
"params": {idfieldname: mediaid, "playcount": playcount_wl, "lastplayed": utils.TimeStamptosqlDateTime(lastplayed_new)},
"id": 1
}
json_response = utils.executeJSON(jsondict)
if (json_response.has_key('result') and json_response['result'] == 'OK'):
utils.log(u'write_xbmc_wdata: xbmc database updated for %s. playcount: {%d -> %d}, lastplayed: {"%s" -> "%s"} (%sid=%d)' % (name, playcount_xbmc, playcount_wl, utils.TimeStamptosqlDateTime(lastplayed_xbmc), utils.TimeStamptosqlDateTime(lastplayed_new), modus, mediaid), xbmc.LOGINFO)
if utils.getSetting("debug") == 'true':
if playcount_wl == 0:
if notifications > 0: utils.showNotification(utils.getString(32404), name)
else:
if notifications > 0: utils.showNotification(utils.getString(32401), name)
count_update += 1
# update the xbmc-db-mirror-variable
if modus == 'movie':
self.watchedmovielist_xbmc[i][3] = lastplayed_new
self.watchedmovielist_xbmc[i][4] = playcount_wl
else:
self.watchedepisodelist_xbmc[i][3] = lastplayed_new
self.watchedepisodelist_xbmc[i][4] = playcount_wl
else:
utils.log(u'write_xbmc_wdata: error updating xbmc database. %s. json_response=%s' % (name, str(json_response)), xbmc.LOGERROR)
else:
# the movie is in the watched-list but not in the xbmc-list -> no action
# utils.log(u'write_xbmc_wdata: movie not in xbmc database: tt%d, %s' % (imdbId, row_xbmc[2]), xbmc.LOGDEBUG)
continue
except:
utils.log(u"write_xbmc_wdata: Error while updating %s %s: %s" % (modus, name, sys.exc_info()[2]), xbmc.LOGERROR)
if progressdialogue: DIALOG_PROGRESS.close()
buggalo.addExtraData('count_update', count_update);
buggalo.onExceptionRaised()
return 1
if progressdialogue: DIALOG_PROGRESS.close()
if notifications > 1:
if modus == 'movie': strno = [32204, 32302]
else: strno = [32205, 32303];
utils.showNotification(utils.getString(strno[0]), utils.getString(strno[1])%(count_update))
return 0
def database_copy(self):
"""create a copy of the database, in case something goes wrong (only if database file is used)
Returns:
return code:
0 successfully copied database
1 file writing error
2 program exception
"""
if utils.getSetting("db_format") != '0':
return 0 # no backup needed since we are using mysql database
if utils.getSetting('dbbackup') == 'false':
return 0 # no backup requested in the addon settings
if not self.dbcopydone:
if not xbmcvfs.exists(self.dbpath):
utils.log(u'database_copy: directory %s does not exist. No backup possible.' % self.dbpath, xbmc.LOGERROR)
return 1
now = datetime.datetime.now()
timestr = u'%04d%02d%02d_%02d%02d%02d' % (now.year, now.month, now.day, now.hour, now.minute, now.second)
zipfilename = os.path.join(self.dbdirectory, utils.decode(timestr + u'-watchedlist.db.zip'))
zf = False
try:
zf = zipfile.ZipFile(zipfilename, 'w')
zf.write(self.dbpath, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
self.dbcopydone = True
utils.log(u'database_copy: database backup copy created to %s' % zipfilename, xbmc.LOGINFO)
# copy the zip file with xbmc file system, if needed
if self.dbfileaccess == 'copy':
xbmcvfs.copy(zipfilename, os.path.join(self.dbdirectory_copy, utils.decode(timestr + u'-watchedlist.db.zip')))
xbmcvfs.delete(zipfilename)
return 0
except:
if zf:
zf.close()
buggalo.addExtraData('zipfilename', zipfilename);
buggalo.onExceptionRaised()
return 2
def watch_user_changes(self, idletime_old, idletime):
"""check if the user made changes in the watched states. Especially setting movies as "not watched".
This can not be recognized by the other functions
Args:
idletime_old: Old Idle Time
idletime: New Idle Time
"""
if xbmc.Player().isPlaying() == True:
return
if idletime > idletime_old:
# the idle time increased. No user interaction probably happened
return
utils.log(u'watch_user_changes: Check for user changes (no. %d)' % self.watch_user_changes_count, xbmc.LOGDEBUG)
self.watch_user_changes_count = self.watch_user_changes_count + 1
# save previous state
old_watchedmovielist_xbmc = self.watchedmovielist_xbmc
old_watchedepisodelist_xbmc = self.watchedepisodelist_xbmc
# get new state
self.get_watched_xbmc(1)
#save exception information
buggalo.addExtraData('len_old_watchedmovielist_xbmc', len(old_watchedmovielist_xbmc))
buggalo.addExtraData('len_old_watchedepisodelist_xbmc', len(old_watchedepisodelist_xbmc))
buggalo.addExtraData('len_self_watchedmovielist_xbmc', len(self.watchedmovielist_xbmc))
buggalo.addExtraData('len_self_watchedepisodelist_xbmc', len(self.watchedepisodelist_xbmc))
# separate the change detection and the change in the database to prevent circle reference
indices_changed = list([])
# compare states of movies/episodes
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
if modus == 'movie':
list_new = self.watchedmovielist_xbmc
list_old = old_watchedmovielist_xbmc
else:
list_new = self.watchedepisodelist_xbmc
list_old = old_watchedepisodelist_xbmc
if len(list_old) == 0 or len(list_new) == 0:
# one of the lists is empty: nothing to compare. No user changes noticable
continue
for i_n, row_xbmc in enumerate(list_new):
if xbmc.abortRequested: return
mediaid = row_xbmc[7]
lastplayed_new = row_xbmc[3]
playcount_new = row_xbmc[4]
# index of this movie/episode in the old database (before the change by the user)
if (len(list_old) > i_n) and (list_old[i_n][7] == mediaid): i_o = i_n # db did not change
else: # search the movieid
i_o = [i for i, x in enumerate(list_old) if x[7] == mediaid]
if len(i_o) == 0: continue #movie is not in old array
i_o = i_o[0] # convert list to int
lastplayed_old = list_old[i_o][3]
playcount_old = list_old[i_o][4]
if playcount_new != playcount_old or lastplayed_new != lastplayed_old:
if playcount_new == playcount_old and playcount_new == 0:
continue # do not add lastplayed to database, when placount = 0
# The user changed the playcount or lastplayed.
# update wl with new watched state
indices_changed.append([i_n, i_o, row_xbmc])
# go through all movies changed by the user
for icx in indices_changed:
if xbmc.abortRequested: return 1
i_o = icx[1]; row_xbmc = icx[2]
i_n = icx[0];
lastplayed_old = list_old[i_o][3]; playcount_old = list_old[i_o][4];
lastplayed_new = row_xbmc[3]; playcount_new = row_xbmc[4]; mediaid = row_xbmc[7]
utils.log(u'watch_user_changes: %s "%s" changed playcount {%d -> %d} lastplayed {"%s" -> "%s"}. %sid=%d' % (modus, row_xbmc[5], playcount_old, playcount_new, utils.TimeStamptosqlDateTime(lastplayed_old), utils.TimeStamptosqlDateTime(lastplayed_new), modus, mediaid))
try:
self.wl_update_media(modus, row_xbmc, 1, 1)
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'write_wl_wdata: SQLite Database error (%s) while updating %s %s' % (errstring, modus, row_xbmc[5]))
if utils.getSetting("debug") == 'true':
utils.showNotification(utils.getString(32102), utils.getString(32606) % ('(%s)' % errstring))
# error because of db locked or similar error
self.close_db()
break
except mysql.connector.Error as err:
# Catch common mysql errors and show them to guide the user
utils.log(u'write_wl_wdata: MySQL Database error (%s) while updating %s %s' % (err, modus, row_xbmc[5]))
if utils.getSetting("debug") == 'true':
utils.showNotification(utils.getString(32102), utils.getString(32606) % ('(%s)' % err))
self.close_db()
break
# update xbmc watched status, e.g. to set duplicate movies also as watched
if len(indices_changed) > 0:
self.write_xbmc_wdata(0, 1) # this changes self.watchedmovielist_xbmc
self.close_db() # keep the db closed most of the time (no access problems)
def wl_update_media(self, mediatype, row_xbmc, saveanyway, commit):
"""update the wl database for one movie/episode with the information in row_xbmc.
Args:
mediatype: 'episode' or 'movie'
row_xbmc: One row of the xbmc media table self.watchedmovielist_xbmc.
saveanyway: Skip checks whether not to save the changes
commit: The db change is committed directly (slow with many movies, but safe)
Returns:
return code:
2 error loading database
count_return:
list with 2 entries: ???
"""
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
buggalo.addExtraData('len_self_watchedmovielist_wl', len(self.watchedmovielist_wl))
buggalo.addExtraData('len_self_watchedepisodelist_wl', len(self.watchedepisodelist_wl))
buggalo.addExtraData('len_self_tvshownames', len(self.tvshownames))
buggalo.addExtraData('row_xbmc', row_xbmc)
buggalo.addExtraData('saveanyway', saveanyway)
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
for modus in [mediatype]:
buggalo.addExtraData('modus', modus)
# row_xbmc: 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
imdbId = row_xbmc[0]
lastplayed_xbmc = row_xbmc[3]
playcount_xbmc = row_xbmc[4]
name = row_xbmc[5]
if modus == 'episode':
season = row_xbmc[1]
episode = row_xbmc[2]
count_return = list([0, 0])
self.database_copy()
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return count_return
if not saveanyway and playcount_xbmc == 0:
# playcount in xbmc-list is empty. Nothing to save
if utils.getSetting("debug") == 'true':
# utils.log(u'wl_update_%s: not watched in xbmc: tt%d, %s' % (modus, imdbId, name), xbmc.LOGDEBUG)
pass
return count_return
if modus == 'movie':
j = [ii for ii, x in enumerate(self.watchedmovielist_wl) if x[0] == imdbId]
if modus == 'episode':
j = [ii for ii, x in enumerate(self.watchedepisodelist_wl) if x[0] == imdbId and x[1] == season and x[2] == episode]
if len(j) > 0:
j = j[0] # there can only be one valid index j, since only one entry in wl per imdbId
# the movie is already in the watched-list
if modus == 'movie':
row_wl = self.watchedmovielist_wl[j]
else:
row_wl = self.watchedepisodelist_wl[j]
lastplayed_wl = row_wl[3]
playcount_wl = row_wl[4]
lastchange_wl = row_wl[6]
if not saveanyway:
# compare playcount and lastplayed
# check if an update of the wl database is necessary (xbmc watched status newer)
if lastchange_wl > lastplayed_xbmc:
return count_return# no update of WL-db. Return
if playcount_wl >= playcount_xbmc and lastplayed_wl >= lastplayed_xbmc:
if utils.getSetting("debug") == 'true':
# utils.log(u'wl_update_movie: wl database up-to-date for movie tt%d, %s' % (imdbId, moviename), xbmc.LOGDEBUG)
pass
return count_return
# check if the lastplayed-timestamp in xbmc is useful
if lastplayed_xbmc == 0:
lastplayed_new = lastplayed_wl
else:
lastplayed_new = lastplayed_xbmc
else:
lastplayed_new = lastplayed_xbmc
lastchange_new = int(time.time())
if modus == 'movie':
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'UPDATE movie_watched SET playCount = ?, lastplayed = ?, lastChange = ? WHERE idMovieImdb LIKE ?'
else: # mysql
sql = 'UPDATE movie_watched SET playCount = %s, lastplayed = %s, lastChange = FROM_UNIXTIME(%s) WHERE idMovieImdb LIKE %s'
values = list([playcount_xbmc, lastplayed_new, lastchange_new, imdbId])
else:
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'UPDATE episode_watched SET playCount = ?, lastPlayed = ?, lastChange = ? WHERE idShow LIKE ? AND season LIKE ? AND episode LIKE ?'
else: # mysql
sql = 'UPDATE episode_watched SET playCount = %s, lastPlayed = FROM_UNIXTIME(%s), lastChange = FROM_UNIXTIME(%s) WHERE idShow LIKE %s AND season LIKE %s AND episode LIKE %s'
values = list([playcount_xbmc, lastplayed_new, lastchange_new, imdbId, season, episode])
self.sqlcursor.execute(sql, values)
count_return[1] = 1
# update the local mirror variable of the wl database: # 0imdbnumber, season, episode, 3lastPlayed, 4playCount, 5title, 6lastChange
if modus == 'movie':
self.watchedmovielist_wl[j] = list([imdbId, 0, 0, lastplayed_new, playcount_xbmc, name, lastchange_new])
else:
self.watchedepisodelist_wl[j] = list([imdbId, season, episode, lastplayed_new, playcount_xbmc, name, lastchange_new])
if utils.getSetting("debug") == 'true':
utils.log(u'wl_update_%s: updated wl db for "%s" (tt%d). playcount: {%d -> %d}. lastplayed: {"%s" -> "%s"}. lastchange: "%s"' % (modus, name, imdbId, playcount_wl, playcount_xbmc, utils.TimeStamptosqlDateTime(lastplayed_wl), utils.TimeStamptosqlDateTime(lastplayed_new), utils.TimeStamptosqlDateTime(lastchange_new)))
if playcount_xbmc > 0:
utils.showNotification(utils.getString(32403), name)
else:
utils.showNotification(utils.getString(32405), name)
else:
# the movie is not in the watched-list -> insert the movie
# order: idMovieImdb,playCount,lastChange,lastPlayed,title
lastchange_new = int(time.time())
if modus == 'movie':
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'INSERT INTO movie_watched (idMovieImdb,playCount,lastChange,lastPlayed,title) VALUES (?, ?, ?, ?, ?)'
else: # mysql
sql = 'INSERT INTO movie_watched (idMovieImdb,playCount,lastChange,lastPlayed,title) VALUES (%s, %s, FROM_UNIXTIME(%s), FROM_UNIXTIME(%s), %s)'
values = list([imdbId, playcount_xbmc, lastchange_new, lastplayed_xbmc, name])
else:
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'INSERT INTO episode_watched (idShow,season,episode,playCount,lastChange,lastPlayed) VALUES (?, ?, ?, ?, ?, ?)'
else: # mysql
sql = 'INSERT INTO episode_watched (idShow,season,episode,playCount,lastChange,lastPlayed) VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s), FROM_UNIXTIME(%s))'
values = list([imdbId, season, episode, playcount_xbmc, lastchange_new, lastplayed_xbmc])
self.sqlcursor.execute(sql, values)
utils.log(u'wl_update_%s: new entry for wl database: "%s", lastChange="%s", lastPlayed="%s", playCount=%d' % (modus, name, utils.TimeStamptosqlDateTime(lastchange_new), utils.TimeStamptosqlDateTime(lastplayed_xbmc), playcount_xbmc))
count_return[0] = 1
# update the local mirror variable of the wl database
if modus == 'movie':
self.watchedmovielist_wl.append(list([imdbId, 0, 0, lastplayed_xbmc, playcount_xbmc, name, lastchange_new]))
else:
self.watchedepisodelist_wl.append(list([imdbId, season, episode, lastplayed_xbmc, playcount_xbmc, name, lastchange_new]))
if utils.getSetting("debug") == 'true':
if playcount_xbmc > 0:
utils.showNotification(utils.getString(32402), name)
else:
utils.showNotification(utils.getString(32405), name)
if commit:
self.sqlcon.commit()
return count_return | gpl-3.0 | -5,481,094,405,210,845,000 | 53.060086 | 337 | 0.529812 | false |
wadobo/timebank | tinymce/views.py | 1 | 4408 | # Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
import json
import logging
from django.core import urlresolvers
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from tinymce.compressor import gzip_compressor
from tinymce.widgets import get_language_config
def textareas_js(request, name, lang=None):
"""
Returns a HttpResponse whose content is a Javscript file. The template
is loaded from 'tinymce/<name>_textareas.js' or
'<name>/tinymce_textareas.js'. Optionally, the lang argument sets the
content language.
"""
template_files = (
'tinymce/%s_textareas.js' % name,
'%s/tinymce_textareas.js' % name,
)
template = loader.select_template(template_files)
vars = get_language_config(lang)
vars['content_language'] = lang
context = RequestContext(request, vars)
return HttpResponse(template.render(context),
content_type="application/x-javascript")
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
import enchant
raw = request.raw_post_data
input = json.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language '%s'" % lang)
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unkown spellcheck method: '%s'" % method)
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(json.dumps(output),
content_type='application/json')
def preview(request, name):
"""
Returns a HttpResponse whose content is an HTML file that is used
by the TinyMCE preview plugin. The template is loaded from
'tinymce/<name>_preview.html' or '<name>/tinymce_preview.html'.
"""
template_files = (
'tinymce/%s_preview.html' % name,
'%s/tinymce_preview.html' % name,
)
template = loader.select_template(template_files)
return HttpResponse(template.render(RequestContext(request)),
content_type="text/html")
def flatpages_link_list(request):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links to flatpages.
"""
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list)
def compressor(request):
"""
Returns a GZip-compressed response.
"""
return gzip_compressor(request)
def render_to_link_list(link_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links suitable for use wit the TinyMCE external_link_list_url
configuration option. The link_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCELinkList', link_list)
def render_to_image_list(image_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of images suitable for use wit the TinyMCE external_image_list_url
configuration option. The image_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCEImageList', image_list)
def render_to_js_vardef(var_name, var_value):
output = "var %s = %s" % (var_name, json.dumps(var_value))
return HttpResponse(output, content_type='application/x-javascript')
def filebrowser(request):
fb_url = "%s://%s%s" % (request.is_secure() and 'https' or 'http',
request.get_host(), urlresolvers.reverse('fb_browse'))
return render_to_response('tinymce/filebrowser.js', {'fb_url': fb_url},
context_instance=RequestContext(request))
| agpl-3.0 | 2,562,349,873,354,733,000 | 33.170543 | 79 | 0.667877 | false |
Gentux/etalage | docs/conf.py | 1 | 8704 | # -*- coding: utf-8 -*-
#
# Etalage documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 29 17:01:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Etalage'
copyright = u'2014, Romain Soufflet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes']
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Etalagedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Etalage.tex', u'Etalage Documentation',
u'Romain Soufflet', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'etalage', u'Etalage Documentation',
[u'Romain Soufflet'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Etalage', u'Etalage Documentation',
u'Romain Soufflet', 'Etalage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 | 101,110,575,252,617,650 | 31 | 82 | 0.707835 | false |
markokr/sysca | tests/helpers.py | 1 | 1213 |
import binascii
import os.path
from sysca import api as sysca
_FDIR = os.path.join(os.path.dirname(__file__), "files")
def demo_fn(basename):
return os.path.join(_FDIR, basename)
def demo_data(basename, mode="rb"):
if "b" in mode:
with open(demo_fn(basename), mode) as f:
return f.read().replace(b"\r\n", b"\n")
with open(demo_fn(basename), mode, encoding="utf8") as f:
return f.read().replace("\r\n", "\n")
def demo_raw(basename):
return depem(demo_data(basename))
def depem(data):
if isinstance(data, str):
data = data.encode("ascii")
p1 = data.find(b"-----\n") + 6
p2 = data.find(b"\n-----", p1)
return binascii.a2b_base64(data[p1:p2])
def new_root(ktype="ec", **kwargs):
ca_key = sysca.new_key(ktype)
ca_info = sysca.CertInfo(ca=True, load=ca_key, **kwargs)
ca_cert = sysca.create_x509_cert(ca_key, ca_key.public_key(), ca_info, ca_info, 365)
return ca_key, ca_cert
def new_cert(ca_key, ca_info, ktype="ec", **kwargs):
key = sysca.new_key(ktype)
info = sysca.CertInfo(load=key.public_key(), **kwargs)
cert = sysca.create_x509_cert(ca_key, key.public_key(), info, ca_info, 365)
return key, cert
| isc | -8,735,806,898,496,008,000 | 25.955556 | 88 | 0.619126 | false |
onitake/Uranium | examples/definition_viewer/main.py | 1 | 2450 | # Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import sys
import os.path
import signal
import traceback
from PyQt5.QtCore import QObject, QUrl, pyqtSlot, pyqtProperty, pyqtSignal
from PyQt5.QtQml import QQmlApplicationEngine, qmlRegisterType
from PyQt5.QtWidgets import QApplication
import UM.Resources
import UM.Settings
import DefinitionTreeModel
class DefinitionLoader(QObject):
def __init__(self, parent = None):
super().__init__(parent)
self._metadata = {}
self._definition_id = ""
@pyqtSlot("QUrl", result = str)
def load(self, file_path):
try:
definition = UM.Settings.DefinitionContainer(file_path.fileName())
dirname = os.path.dirname(file_path.toLocalFile())
UM.Resources.Resources.addSearchPath(dirname)
UM.Resources.Resources.addSearchPath(os.path.realpath(os.path.join(dirname, "..")))
with open(file_path.toLocalFile()) as data:
definition.deserialize(data.read())
self._metadata = dict(definition.metaData)
self.metaDataChanged.emit()
UM.Settings.ContainerRegistry.getInstance().addContainer(definition)
self._definition_id = definition.id
self.loaded.emit()
except Exception as e:
error_text = "An exception occurred loading file {0}:\n".format(file_path)
error_text += str(e)
error_text += traceback.format_exc()
self.error.emit(error_text)
loaded = pyqtSignal()
error = pyqtSignal(str, arguments=["errorText"])
metaDataChanged = pyqtSignal()
@pyqtProperty("QVariantMap", notify=metaDataChanged)
def metaData(self):
return self._metadata
@pyqtProperty(str, notify=loaded)
def definitionId(self):
return self._definition_id
signal.signal(signal.SIGINT, signal.SIG_DFL)
file_name = None
if len(sys.argv) > 1:
file_name = sys.argv[1]
del sys.argv[1]
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
qmlRegisterType(DefinitionLoader, "Example", 1, 0, "DefinitionLoader")
qmlRegisterType(DefinitionTreeModel.DefinitionTreeModel, "Example", 1, 0, "DefinitionTreeModel")
if file_name:
engine.rootContext().setContextProperty("open_file", QUrl.fromLocalFile(file_name))
engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
app.exec_()
| agpl-3.0 | 3,755,217,824,218,569,000 | 30.410256 | 96 | 0.677959 | false |
yeon3683/handpose | util/handdetector.py | 1 | 18839 | """Provides a basic hand detector in depth images.
HandDetector provides interface for detecting hands in depth image, by using the center of mass.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import os
import cv2
from scipy import stats, ndimage
class HandDetector(object):
"""
Detect hand based on simple heuristic, centered at Center of Mass
"""
RESIZE_BILINEAR = 0
RESIZE_CV2_NN = 1
RESIZE_CV2_LINEAR = 2
def __init__(self, dpt, fx, fy, importer=None):
"""
Constructor
:param dpt: depth image
:param fx: camera focal lenght
:param fy: camera focal lenght
"""
self.dpt = dpt
self.maxDepth = min(1500, dpt.max())
self.minDepth = max(10, dpt.min())
# set values out of range to 0
self.dpt[self.dpt > self.maxDepth] = 0.
self.dpt[self.dpt < self.minDepth] = 0.
# camera settings
self.fx = fx
self.fy = fy
self.importer = importer
# depth resize method
self.resizeMethod = self.RESIZE_CV2_NN
def calculateCoM(self, dpt):
"""
Calculate the center of mass
:param dpt: depth image
:return: (x,y,z) center of mass
"""
dc = dpt.copy()
dc[dc < self.minDepth] = 0
dc[dc > self.maxDepth] = 0
cc = ndimage.measurements.center_of_mass(dc > 0)
num = numpy.count_nonzero(dc)
com = numpy.array((cc[1]*num, cc[0]*num, dc.sum()), numpy.float)
if num == 0:
return numpy.array((0, 0, 0), numpy.float)
else:
return com/num
def checkImage(self, tol):
"""
Check if there is some content in the image
:param tol: tolerance
:return:True if image is contentful, otherwise false
"""
# print numpy.std(self.dpt)
if numpy.std(self.dpt) < tol:
return False
else:
return True
def getNDValue(self):
"""
Get value of not defined depth value distances
:return:value of not defined depth value
"""
if self.dpt[self.dpt < self.minDepth].shape[0] > self.dpt[self.dpt > self.maxDepth].shape[0]:
return stats.mode(self.dpt[self.dpt < self.minDepth])[0][0]
else:
return stats.mode(self.dpt[self.dpt > self.maxDepth])[0][0]
@staticmethod
def bilinearResize(src, dsize, ndValue):
"""
Bilinear resizing with sparing out not defined parts of the depth map
:param src: source depth map
:param dsize: new size of resized depth map
:param ndValue: value of not defined depth
:return:resized depth map
"""
dst = numpy.zeros((dsize[1], dsize[0]), dtype=numpy.float32)
x_ratio = float(src.shape[1] - 1) / dst.shape[1]
y_ratio = float(src.shape[0] - 1) / dst.shape[0]
for row in range(dst.shape[0]):
y = int(row * y_ratio)
y_diff = (row * y_ratio) - y # distance of the nearest pixel(y axis)
y_diff_2 = 1 - y_diff
for col in range(dst.shape[1]):
x = int(col * x_ratio)
x_diff = (col * x_ratio) - x # distance of the nearest pixel(x axis)
x_diff_2 = 1 - x_diff
y2_cross_x2 = y_diff_2 * x_diff_2
y2_cross_x = y_diff_2 * x_diff
y_cross_x2 = y_diff * x_diff_2
y_cross_x = y_diff * x_diff
# mathematically impossible, but just to be sure...
if(x+1 >= src.shape[1]) | (y+1 >= src.shape[0]):
raise UserWarning("Shape mismatch")
# set value to ND if there are more than two values ND
numND = int(src[y, x] == ndValue) + int(src[y, x + 1] == ndValue) + int(src[y + 1, x] == ndValue) + int(
src[y + 1, x + 1] == ndValue)
if numND > 2:
dst[row, col] = ndValue
continue
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
# interpolate only over known values, switch to linear interpolation
if src[y, x] == ndValue:
y2_cross_x2 = 0.
y2_cross_x = 1. - y_cross_x - y_cross_x2
if src[y, x + 1] == ndValue:
y2_cross_x = 0.
if y2_cross_x2 != 0.:
y2_cross_x2 = 1. - y_cross_x - y_cross_x2
if src[y + 1, x] == ndValue:
y_cross_x2 = 0.
y_cross_x = 1. - y2_cross_x - y2_cross_x2
if src[y + 1, x + 1] == ndValue:
y_cross_x = 0.
if y_cross_x2 != 0.:
y_cross_x2 = 1. - y2_cross_x - y2_cross_x2
# print src[y, x], src[y, x+1],src[y+1, x],src[y+1, x+1]
# normalize weights
if not ((y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.)):
sc = 1. / (y_cross_x + y_cross_x2 + y2_cross_x + y2_cross_x2)
y2_cross_x2 *= sc
y2_cross_x *= sc
y_cross_x2 *= sc
y_cross_x *= sc
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
if (y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.):
dst[row, col] = ndValue
else:
dst[row, col] = y2_cross_x2 * src[y, x] + y2_cross_x * src[y, x + 1] + y_cross_x2 * src[
y + 1, x] + y_cross_x * src[y + 1, x + 1]
return dst
def comToBounds(self, com, size):
"""
Calculate boundaries, project to 3D, then add offset and backproject to 2D (ux, uy are canceled)
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:return: xstart, xend, ystart, yend, zstart, zend
"""
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
xstart = int(numpy.floor((com[0] * com[2] / self.fx - size[0] / 2.) / com[2]*self.fx))
xend = int(numpy.floor((com[0] * com[2] / self.fx + size[0] / 2.) / com[2]*self.fx))
ystart = int(numpy.floor((com[1] * com[2] / self.fy - size[1] / 2.) / com[2]*self.fy))
yend = int(numpy.floor((com[1] * com[2] / self.fy + size[1] / 2.) / com[2]*self.fy))
return xstart, xend, ystart, yend, zstart, zend
def getCrop(self, dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z=True):
"""
Crop patch from image
:param dpt: depth image to crop from
:param xstart: start x
:param xend: end x
:param ystart: start y
:param yend: end y
:param zstart: start z
:param zend: end z
:param thresh_z: threshold z values
:return: cropped image
"""
if len(dpt.shape) == 2:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1])].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1]))), mode='constant', constant_values=0)
elif len(dpt.shape) == 3:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1]), :].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1])),
(0, 0)), mode='constant', constant_values=0)
else:
raise NotImplementedError()
if thresh_z is True:
msk1 = numpy.bitwise_and(cropped < zstart, cropped != 0)
msk2 = numpy.bitwise_and(cropped > zend, cropped != 0)
cropped[msk1] = zstart
cropped[msk2] = 0. # backface is at 0, it is set later
return cropped
def resizeCrop(self, crop, sz):
"""
Resize cropped image
:param crop: crop
:param sz: size
:return: resized image
"""
if self.resizeMethod == self.RESIZE_CV2_NN:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_NEAREST)
elif self.resizeMethod == self.RESIZE_BILINEAR:
rz = self.bilinearResize(crop, sz, self.getNDValue())
elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_LINEAR)
else:
raise NotImplementedError("Unknown resize method!")
return rz
def applyCrop3D(self, dpt, com, size, dsize, thresh_z=True, background=None):
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z)
wb = (xend - xstart)
hb = (yend - ystart)
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# depth resize
rz = self.resizeCrop(cropped, sz)
if background is None:
background = self.getNDValue() # use background as filler
ret = numpy.ones(dsize, numpy.float32) * background
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
return ret
def cropArea3D(self, com=None, size=(250, 250, 250), dsize=(128, 128), docom=False):
"""
Crop area of hand in 3D volumina, scales inverse to the distance of hand to camera
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:param dsize: (x,y) extent of the destination size
:return: cropped hand image, transformation matrix for joints, CoM in image coordinates
"""
# print com, self.importer.jointImgTo3D(com)
# import matplotlib.pyplot as plt
# import matplotlib
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.imshow(self.dpt, cmap=matplotlib.cm.jet)
if len(size) != 3 or len(dsize) != 2:
raise ValueError("Size must be 3D and dsize 2D bounding box")
if com is None:
com = self.calculateCoM(self.dpt)
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='.')
#############
# for simulating COM within cube
if docom is True:
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
if numpy.isclose(com[2], 0):
com[2] = 300.
com[0] += xstart
com[1] += ystart
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='x')
wb = (xend - xstart)
hb = (yend - ystart)
trans = numpy.asmatrix(numpy.eye(3, dtype=float))
trans[0, 2] = -xstart
trans[1, 2] = -ystart
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# print com, sz, cropped.shape, xstart, xend, ystart, yend, hb, wb, zstart, zend
if cropped.shape[0] > cropped.shape[1]:
scale = numpy.asmatrix(numpy.eye(3, dtype=float) * sz[1] / float(cropped.shape[0]))
else:
scale = numpy.asmatrix(numpy.eye(3, dtype=float) * sz[0] / float(cropped.shape[1]))
scale[2, 2] = 1
# depth resize
rz = self.resizeCrop(cropped, sz)
# pylab.imshow(rz); pylab.gray();t=transformPoint2D(com,scale*trans);pylab.scatter(t[0],t[1]); pylab.show()
ret = numpy.ones(dsize, numpy.float32) * self.getNDValue() # use background as filler
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
# print rz.shape
off = numpy.asmatrix(numpy.eye(3, dtype=float))
off[0, 2] = xstart
off[1, 2] = ystart
# fig = plt.figure()
# ax = fig.add_subplot(131)
# ax.imshow(cropped, cmap=matplotlib.cm.jet)
# ax = fig.add_subplot(132)
# ax.imshow(rz, cmap=matplotlib.cm.jet)
# ax = fig.add_subplot(133)
# ax.imshow(ret, cmap=matplotlib.cm.jet)
# plt.show(block=False)
# print trans,scale,off,off*scale*trans
return ret, off * scale * trans, com
def checkPose(self, joints):
"""
Check if pose is anatomically possible
@see Serre: Kinematic model of the hand using computer vision
:param joints: joint locations R^16x3
:return: true if pose is possible
"""
# check dip, pip of fingers
return True
def detect(self, size=(250, 250, 250), doHandSize=True):
"""
Detect the hand as closest object to camera
:param size: bounding box size
:return: center of mass of hand
"""
steps = 20
dz = (self.maxDepth - self.minDepth)/float(steps)
for i in range(steps):
part = self.dpt.copy()
part[part < i*dz + self.minDepth] = 0
part[part > (i+1)*dz + self.minDepth] = 0
part[part != 0] = 10 # set to something
ret, thresh = cv2.threshold(part, 1, 255, cv2.THRESH_BINARY)
thresh = thresh.astype(dtype=numpy.uint8)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in range(len(contours)):
if cv2.contourArea(contours[c]) > 200:
# centroid
M = cv2.moments(contours[c])
cx = int(numpy.rint(M['m10']/M['m00']))
cy = int(numpy.rint(M['m01']/M['m00']))
# crop
xstart = int(max(cx-100, 0))
xend = int(min(cx+100, self.dpt.shape[1]-1))
ystart = int(max(cy-100, 0))
yend = int(min(cy+100, self.dpt.shape[0]-1))
cropped = self.dpt[ystart:yend, xstart:xend].copy()
cropped[cropped < i*dz + self.minDepth] = 0.
cropped[cropped > (i+1)*dz + self.minDepth] = 0.
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
com[0] += xstart
com[1] += ystart
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
if doHandSize is True:
# refined contour for size estimation
part_ref = self.dpt.copy()
part_ref[part_ref < zstart] = 0
part_ref[part_ref > zend] = 0
part_ref[part_ref != 0] = 10 # set to something
ret, thresh_ref = cv2.threshold(part_ref, 1, 255, cv2.THRESH_BINARY)
contours_ref, _ = cv2.findContours(thresh_ref.astype(dtype=numpy.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find the largest contour
areas = [cv2.contourArea(cc) for cc in contours_ref]
c_max = numpy.argmax(areas)
# final result
return com, self.estimateHandsize(contours_ref[c_max], com, size)
else:
return com, size
# no appropriate hand detected
return numpy.array((0, 0, 0), numpy.float), size
def estimateHandsize(self, contours, com, cube=(250, 250, 250), tol=0):
"""
Estimate hand size from depth image
:param contours: contours of hand
:param com: center of mass
:param cube: default cube
:param tol: tolerance to be added to all sides
:return: metric cube for cropping (x, y, z)
"""
x, y, w, h = cv2.boundingRect(contours)
# drawing = numpy.zeros((480, 640), dtype=float)
# cv2.drawContours(drawing, [contours], 0, (255, 0, 244), 1, 8)
# cv2.rectangle(drawing, (x, y), (x+w, y+h), (244, 0, 233), 2, 8, 0)
# cv2.imshow("contour", drawing)
# convert to cube
xstart = (com[0] - w / 2.) * com[2] / self.fx
xend = (com[0] + w / 2.) * com[2] / self.fx
ystart = (com[1] - h / 2.) * com[2] / self.fy
yend = (com[1] + h / 2.) * com[2] / self.fy
szx = xend - xstart
szy = yend - ystart
sz = (szx + szy) / 2.
cube = (sz + tol, sz + tol, sz + tol)
return cube
| gpl-3.0 | 4,676,184,767,098,701,000 | 39.86551 | 136 | 0.524019 | false |
ytsarev/rally | rally/deploy/serverprovider/providers/existing.py | 1 | 2084 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.deploy.serverprovider import provider
class ExistingServers(provider.ProviderFactory):
"""Just return endpoints from own configuration."""
CREDENTIALS_SCHEMA = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'user': {'type': 'string'},
'key': {'type': 'string'},
'password': {'type': 'string'}
},
'required': ['host', 'user']
}
CONFIG_SCHEMA = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'credentials': {
'type': 'array',
'items': CREDENTIALS_SCHEMA
},
},
'required': ['credentials']
}
def __init__(self, deployment, config):
super(ExistingServers, self).__init__(deployment, config)
self.credentials = config['credentials']
def create_servers(self):
servers = []
for endpoint in self.credentials:
servers.append(provider.Server(host=endpoint['host'],
user=endpoint['user'],
key=endpoint.get('key'),
password=endpoint.get('password'),
port=endpoint.get('port', 22)))
return servers
def destroy_servers(self):
pass
| apache-2.0 | -183,121,135,223,786,270 | 32.612903 | 78 | 0.545106 | false |
odicraig/kodi2odi | addons/plugin.video.teevee/default.py | 1 | 19931 | from __future__ import unicode_literals
from resources.lib.modules.addon import Addon
import sys,os,re
import urlparse,urllib
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
from resources.lib.modules import control,client,teevee2,metadata,cache
from resources.lib.modules.log_utils import log
meta_enabled = control.setting('tv_metadata') == 'true'
paginated = control.setting('limit_shows') == 'true'
offset = int(control.setting('results_number'))
base = 'http://opentuner.is/'
addon = Addon('plugin.video.teevee', sys.argv)
addon_handle = int(sys.argv[1])
if not os.path.exists(control.dataPath):
os.mkdir(control.dataPath)
AddonPath = addon.get_path()
themes=['new','simple']
theme = themes[int(control.setting('theme'))]
IconPath = os.path.join(AddonPath , "resources/media/%s"%theme)
def icon_path(filename):
if 'http://' in filename:
return filename
return os.path.join(IconPath, filename)
fanart = icon_path('fanart.jpg')
args = urlparse.parse_qs(sys.argv[2][1:])
mode = args.get('mode', None)
if mode is None:
addon.add_item({'mode': 'favourites'}, {'title':control.lang(30100).encode('utf-8')}, img=icon_path('Favourites.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'new_episodes', 'page':'1'}, {'title':control.lang(30101).encode('utf-8'), 'page':'1'}, img=icon_path('Latest_added.png'), fanart=fanart,is_folder=True)
if control.setting('enable_calendar')=='true':
addon.add_item({'mode': 'calendar'}, {'title':control.lang(30102).encode('utf-8')}, img=icon_path('Calendar.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/latest-added/', 'page':'1'}, {'title':control.lang(30103).encode('utf-8')}, img=icon_path('Latest_added.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/popular-today/', 'page':'1'}, {'title':control.lang(30104).encode('utf-8')}, img=icon_path('Popular.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/most-popular/', 'page':'1'}, {'title':control.lang(30105).encode('utf-8')}, img=icon_path('Popular.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'alphabet'}, {'title':control.lang(30106).encode('utf-8')}, img=icon_path('AZ.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'genres'}, {'title':control.lang(30107).encode('utf-8')}, img=icon_path('Genre.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'downloader'}, {'title':control.lang(30108).encode('utf-8')}, img=icon_path('Downloads.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'search'}, {'title':control.lang(30109).encode('utf-8')}, img=icon_path('Search.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
from resources.lib.modules import cache, changelog
cache.get(changelog.get, 600000000, control.addonInfo('version'), table='changelog')
elif mode[0]=='favourites':
from resources.lib.modules import favourites
favs = favourites.get_favourites()
total=len(favs)
for fav in favs:
title,url,year = fav
url = base + url
meta = metadata.get_show_meta(title,url,year=year)
context = cache.get(teevee2.get_tv_context,10000,title,url,year,True)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'], total_items=total,contextmenu_items=context,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_shows':
url = url_sh = args['url'][0]
page = int(args['page'][0])
try: sort = args['sort'][0] == 'true'
except: sort = False
shows = cache.get(teevee2.get_shows,24,url)
if sort:
shows.sort(key=lambda x: x[1])
last = False
if paginated and meta_enabled:
if len(shows)<=offset:
last=True
pass
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(shows):
last = True
end = len(shows) - 1
shows = shows[start:end]
total = len(shows)
for show in shows:
url,title,year = show
meta = metadata.get_show_meta(title,url,year=year)
context = teevee2.get_tv_context(title,url,year,False)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total,is_folder=True)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'open_shows', 'url':url_sh, 'page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='new_episodes':
page = int(args['page'][0])
episodes = cache.get(teevee2.get_new_episodes,24)
last = False
if paginated and meta_enabled:
if len(episodes)<=offset:
last=True
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(episodes):
last = True
end = len(episodes) - 1
episodes = episodes[start:end]
total = len(episodes)
for ep in episodes:
url,showtitle,season,episode = ep
meta = metadata.get_episode_meta(showtitle,season,episode,url,more=True)
context = teevee2.get_episode_context(showtitle,season,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':'url'},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'new_episodes','page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='alphabet':
alphabet = teevee2.get_alphabet()
for al in alphabet:
addon.add_item({'mode': 'open_shows', 'url':al[0], 'page':'1','sort':'true'}, {'title':al[1]}, img=icon_path('AZ.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='genres':
alphabet = teevee2.get_genres()
for al in alphabet:
addon.add_item({'mode': 'open_shows', 'url':al[0], 'page':'1'}, {'title':al[1]}, img=icon_path('Genre.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_show':
url = args['url'][0]
show = args['title'][0]
imdb,seasons = teevee2.get_seasons(url)
meta = metadata.get_season_meta(show,len(seasons),imdb)
i = 0
for s in seasons:
addon.add_item({'mode': 'open_season', 'url':s[0], 'num':'%s'%(i+1)}, {'title':'%s %s'%(control.lang(30170).encode('utf-8'),s[1])}, img=meta[i]['cover_url'], fanart=meta[i]['backdrop_url'],is_folder=True)
i += 1
addon.end_of_directory()
elif mode[0]=='open_season':
url = args['url'][0]
num = args['num'][0]
imdb,showtitle,episodes = teevee2.get_episodes(url,num)
total = len(episodes)
for ep in episodes:
url,episode,episode_title = ep
meta = metadata.get_episode_meta(showtitle,num,episode,url,ep_title=episode_title)
if episode_title not in meta['title']:
meta['title'] = '%sx%s %s'%(num,episode,episode_title)
context = teevee2.get_episode_context(showtitle,num,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':url},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
addon.end_of_directory()
elif mode[0]=='calendar':
days = teevee2.get_month()
for day in days:
d=day[1]
m=day[2]
y=day[3]
mnth=day[4]
name=day[0]+', %s %s '%(d,mnth)
addon.add_item({'mode': 'open_day', 'day':d, 'month':m, 'year':y,'page':'1'},{'title': name}, img=icon_path('Calendar.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_day':
day = args['day'][0]
month = args['month'][0]
year = args['year'][0]
page = int(args['page'][0])
episodes = cache.get(teevee2.get_episodes_calendar,100,day,month,year)
last = False
if paginated and meta_enabled:
if len(episodes)<=offset:
last = True
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(episodes):
last = True
end = len(episodes) - 1
episodes = episodes[start:end]
total = len(episodes)
for ep in episodes:
url,season,episode,showtitle,year = ep
meta = metadata.get_episode_meta(showtitle,season,episode,url,more=True)
context = teevee2.get_episode_context(showtitle,season,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':url},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'new_episodes','page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='play_episode':
url = args['url'][0]
links,sl = teevee2.get_sources(url)
if control.setting('autoplay')!='true':
i = control.selectDialog(sl)
if i>-1:
try:
url = links[i]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
import urlresolver
resolved = urlresolver.resolve(url)
if control.setting('use_TM')=='true':
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
resolved = TM.get_streaming_url(resolved)
except:
pass
addon.resolve_url(resolved)
except:
control.infoDialog(control.lang(30168).encode('utf-8'))
else:
index = 0
import urlresolver
done = False
checked = 0
while not done:
url = links[index%len(links)]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
try:
checked +=1
import urlresolver
resolved=urlresolver.resolve(url)
except:
index +=1
continue
if not resolved:
index +=1
continue
else:
break
if checked>=len(links):
resolved = False
break
if resolved:
if control.setting('use_TM')=='true':
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
resolved = TM.get_streaming_url(resolved)
except:
pass
addon.resolve_url(resolved)
elif mode[0] == 'downloader':
import resources.lib.modules.downloader as downloader
downloader.downloader()
elif mode[0] == 'addDownload':
name,url,image=args['name'][0],args['url'][0],args['thumb'][0]
import resources.lib.modules.downloader as downloader
downloader.addDownload(name,url,image)
elif mode[0] == 'removeDownload':
url=args['url'][0]
import resources.lib.modules.downloader as downloader
downloader.removeDownload(url)
elif mode[0] == 'startDownload':
import resources.lib.modules.downloader as downloader
downloader.startDownload()
elif mode[0] == 'startDownloadThread':
import resources.lib.modules.downloader as downloader
downloader.startDownloadThread()
elif mode[0] == 'stopDownload':
import resources.lib.modules.downloader as downloader
downloader.stopDownload()
elif mode[0] == 'statusDownload':
import resources.lib.modules.downloader as downloader
downloader.statusDownload()
elif mode[0]=='download':
url = args['url'][0]
title = args['title'][0]
image = args['thumb'][0]
tm = control.setting('dl_TM') == 'true'
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
except:
tm = False
links,sl = teevee2.get_sources(url)
if control.setting('auto_download')!='true':
i = control.selectDialog(sl)
if i>-1:
try:
url = links[i]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
import urlresolver
resolved = urlresolver.resolve(url)
if tm:
resolved = resolved.split('|')[0]
ext = os.path.splitext(urlparse.urlparse(resolved).path)[1][1:].lower()
if ext == 'm3u8': raise Exception()
filename = title.replace(' ','_')
filename = re.sub('[^-a-zA-Z0-9_.() ]+', '', filename)
filename = filename.rstrip('.')
try:
season = re.findall('S(\d+)',title)[0]
episode = re.findall('E(\d+)',title)[0]
except:
season,episode = '',''
video = {
"type": 'tvshow',
"filename": filename + '.' + ext,
"url": resolved,
"season": season,
"episode": episode,
"addon": "plugin.video.teevee",
"save_dir": control.setting('download_folder')
}
response = TM.enqueue([video])
else:
import resources.lib.modules.downloader as downloader
downloader.addDownload(title,resolved,image,resolved=True)
except:
control.infoDialog(control.lang(30168).encode('utf-8'))
else:
resolved = False
index = 0
import urlresolver
done = False
checked = 0
while not done:
url = links[index%len(links)]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
try:
checked +=1
import urlresolver
resolved=urlresolver.resolve(url)
except:
index +=1
continue
if not resolved:
index +=1
continue
else:
break
if checked>=len(links):
resolved = False
break
if resolved:
if tm:
resolved = resolved.split('|')[0]
ext = os.path.splitext(urlparse.urlparse(resolved).path)[1][1:].lower()
if ext == 'm3u8': raise Exception()
filename = title.replace(' ','_')
filename = re.sub('[^-a-zA-Z0-9_.() ]+', '', filename)
filename = filename.rstrip('.')
try:
season = re.findall('S(\d+)',title)[0]
episode = re.findall('E(\d+)',title)[0]
except:
season,episode = '',''
video = {
"type": 'tvshow',
"filename": filename + '.' + ext,
"url": resolved,
"season": season,
"episode": episode,
"addon": "plugin.video.teevee",
"save_dir": control.setting('download_folder')
}
response = TM.enqueue([video])
else:
import resources.lib.modules.downloader as downloader
downloader.addDownload(title,resolved,image,resolved=True)
elif mode[0]=='add_tv_fav':
name = args['show'][0]
link = args['link'][0]
year = args['year'][0]
from resources.lib.modules import favourites
favourites.add_favourite_show(name,link,year)
elif mode[0]=='rem_tv_fav':
title = args['show'][0]
link = args['link'][0]
from resources.lib.modules import favourites
favourites.remove_tv_fav(title,link)
xbmc.executebuiltin("Container.Refresh")
elif mode[0]=='del_tv_all':
confirm = control.yesnoDialog(control.lang(30169).encode('utf-8'),control.lang(30401).encode('utf-8'),'')
if confirm==1:
from resources.lib.modules import favourites
favourites.delete_all_tv_favs()
xbmc.executebuiltin("Container.Refresh")
control.infoDialog(control.lang(30402).encode('utf-8'))
elif mode[0]=='search':
addon.add_item({'mode': 'open_key_search'}, {'title':'[COLOR green]%s[/COLOR]'%control.lang(30404).encode('utf-8')}, img=icon_path('Search.png'), fanart=fanart,is_folder=True)
from resources.lib.modules import favourites
queries = favourites.get_search_history('tv')
del_url = addon.build_plugin_url({'mode': 'del_his_tv'})
context = [(control.lang(30143).encode('utf-8'),'RunPlugin(%s)'%del_url)]
for q in queries:
addon.add_item({'mode': 'open_search', 'q': q, 'page':'1'}, {'title':q}, img=icon_path('Search.png'), fanart=fanart,contextmenu_items=context, is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_key_search':
q = control.get_keyboard(control.lang(30403).encode('utf-8'))
if q:
from resources.lib.modules import favourites
url = addon.build_plugin_url({'mode':'open_search','q':q,'page':'1'})
favourites.add_search_query(q,'tv')
xbmc.executebuiltin("Container.Refresh")
import time
time.sleep(2)
control.execute('Container.Update(%s)'%url)
elif mode[0]=='open_search':
url = url_sh = args['q'][0]
page = int(args['page'][0])
shows = teevee2.search(url)
last = False
if paginated and meta_enabled:
if len(shows)<=offset:
last=True
pass
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(shows):
last = True
end = len(shows) - 1
shows = shows[start:end]
total = len(shows)
for show in shows:
url,title,year = show
meta = metadata.get_show_meta(title,url,year=year)
context = teevee2.get_tv_context(title,url,year,False)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total,is_folder=True)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'open_search', 'q':url, 'page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='del_his_tv':
from resources.lib.modules import favourites
favourites.delete_history('tv')
xbmc.executebuiltin("Container.Refresh")
control.infoDialog(control.lang(30402).encode('utf-8'))
elif mode[0]=='clear_cache':
cache.clear() | gpl-3.0 | 4,522,614,937,469,297,000 | 37.315582 | 212 | 0.558928 | false |
ooici/coi-services | ion/core/function/transform_function.py | 1 | 2817 | #!/usr/bin/env python
'''
@author: Tim Giguere <[email protected]>
@description: New Implementation for TransformFunction classes
'''
from pyon.public import log
from pyon.core.exception import BadRequest
from interface.objects import Granule
class TransformFunction(object):
"""
The execute function receives an input and several configuration parameters, and returns the processed result.
@param input Any object, granule, or array of granules
@param context A dictionary
@param config A dictionary containing the container configuration. Will usually be empty.
@param params A dictionary containing input parameters
@param state A dictionary containing input state
"""
@staticmethod
def validate_inputs(f):
raise NotImplementedError('Method validate_inputs not implemented')
@staticmethod
def execute(input=None, context=None, config=None, params=None, state=None):
raise NotImplementedError('Method execute not implemented')
class SimpleTransformFunction(TransformFunction):
"""
This class takes in any single input object, performs a function on it, and then returns any output object.
"""
@staticmethod
def validate_inputs(target):
def validate(*args, **kwargs):
return target(*args, **kwargs)
return validate
@staticmethod
def execute(input=None, context=None, config=None, params=None, state=None):
pass
class SimpleGranuleTransformFunction(SimpleTransformFunction):
"""
This class receives a single granule, processes it, then returns a granule object.
"""
@staticmethod
def validate_inputs(target):
def validate(*args, **kwargs):
if args[0]:
if not isinstance(args[0], Granule):
raise BadRequest('input parameter must be of type Granule')
return target(*args, **kwargs)
return validate
@staticmethod
def execute(input=None, context=None, config=None, params=None, state=None):
pass
class MultiGranuleTransformFunction(SimpleGranuleTransformFunction):
"""
This class receives multiple granules, processes them, then returns an array of granules.
"""
@staticmethod
def validate_inputs(target):
def validate(*args, **kwargs):
if args[0]:
if not isinstance(args[0], list):
raise BadRequest('input parameter must be of type List')
for x in args[0]:
if not isinstance(x, Granule):
raise BadRequest('input list may only contain Granules')
return target(*args, **kwargs)
return validate
@staticmethod
def execute(input=None, context=None, config=None, params=None, state=None):
pass | bsd-2-clause | -3,256,532,145,519,457,300 | 32.951807 | 114 | 0.672346 | false |
mandeepdhami/neutron | neutron/tests/unit/agent/test_securitygroups_rpc.py | 1 | 142446 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import mock
from oslo_config import cfg
import oslo_messaging
from testtools import matchers
import webob.exc
from neutron.agent import firewall as firewall_base
from neutron.agent.linux import iptables_manager
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as const
from neutron.common import ipv6_utils as ipv6
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.extensions import test_securitygroup as test_sg
FAKE_PREFIX = {const.IPv4: '10.0.0.0/24',
const.IPv6: '2001:db8::/64'}
FAKE_IP = {const.IPv4: '10.0.0.1',
const.IPv6: 'fe80::1',
'IPv6_GLOBAL': '2001:0db8::1',
'IPv6_LLA': 'fe80::123',
'IPv6_DHCP': '2001:db8::3'}
TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.'
'SecurityGroupRpcTestPlugin')
FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.'
FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver'
FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE +
'OVSHybridIptablesFirewallDriver')
FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver'
def set_enable_security_groups(enabled):
cfg.CONF.set_override('enable_security_group', enabled,
group='SECURITYGROUP')
def set_firewall_driver(firewall_driver):
cfg.CONF.set_override('firewall_driver', firewall_driver,
group='SECURITYGROUP')
class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin,
sg_db_rpc.SecurityGroupServerRpcMixin):
def __init__(self):
super(SecurityGroupRpcTestPlugin, self).__init__()
self.notifier = mock.Mock()
self.devices = {}
def create_port(self, context, port):
result = super(SecurityGroupRpcTestPlugin,
self).create_port(context, port)
self.devices[result['id']] = result
self.notify_security_groups_member_updated(context, result)
return result
def update_port(self, context, id, port):
original_port = self.get_port(context, id)
updated_port = super(SecurityGroupRpcTestPlugin,
self).update_port(context, id, port)
self.devices[id] = updated_port
self.update_security_group_on_port(
context, id, port, original_port, updated_port)
def delete_port(self, context, id):
port = self.get_port(context, id)
super(SecurityGroupRpcTestPlugin, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
del self.devices[id]
def get_port_from_device(self, context, device):
device = self.devices.get(device)
if device:
device['security_group_rules'] = []
device['security_group_source_groups'] = []
device['fixed_ips'] = [ip['ip_address']
for ip in device['fixed_ips']]
return device
class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self, plugin=None):
plugin = plugin or TEST_PLUGIN_CLASS
set_firewall_driver(FIREWALL_NOOP_DRIVER)
super(SGServerRpcCallBackTestCase, self).setUp(plugin)
self.notifier = manager.NeutronManager.get_plugin().notifier
self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback()
def _test_security_group_port(self, device_owner, gw_ip,
cidr, ip_version, ip_address):
with self.network() as net:
with self.subnet(net,
gateway_ip=gw_ip,
cidr=cidr,
ip_version=ip_version) as subnet:
kwargs = {
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_address}]}
if device_owner:
kwargs['device_owner'] = device_owner
res = self._create_port(
self.fmt, net['network']['id'], **kwargs)
res = self.deserialize(self.fmt, res)
port_id = res['port']['id']
if device_owner == const.DEVICE_OWNER_ROUTER_INTF:
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self._delete('ports', port_id)
def test_notify_security_group_ipv6_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::1')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv6_normal_port_added(self):
self._test_security_group_port(
None,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_dhcp_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_DHCP,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.2')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.1')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_normal_port_added(self):
self._test_security_group_port(
None,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_security_group_rules_for_devices_ipv4_ingress(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
@contextlib.contextmanager
def _port_with_addr_pairs_and_security_group(self):
plugin_obj = manager.NeutronManager.get_plugin()
if ('allowed-address-pairs'
not in plugin_obj.supported_extension_aliases):
self.skipTest("Test depends on allowed-address-pairs extension")
fake_prefix = FAKE_PREFIX['IPv4']
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '22',
'22', remote_group_id=sg1_id)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.1.0/24'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '11.0.0.1'}]
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
yield self.deserialize(self.fmt, res1)
def test_security_group_info_for_devices_ipv4_addr_pair(self):
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
# verify that address pairs are included in remote SG IPs
sg_member_ips = self.rpc.security_group_info_for_devices(
ctx, devices=devices)['sg_member_ips']
expected_member_ips = [
'10.0.1.0/24', '11.0.0.1',
port['port']['fixed_ips'][0]['ip_address']]
self.assertEqual(sorted(expected_member_ips),
sorted(sg_member_ips[sg_id]['IPv4']))
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg_id},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '11.0.0.1/32',
'port_range_min': 22},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '10.0.1.0/24',
'port_range_min': 22},
{'direction': 'ingress', 'protocol': 'tcp',
'ethertype': 'IPv4',
'port_range_max': 23, 'security_group_id': sg_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
]
expected = tools.UnorderedList(expected)
self.assertEqual(expected,
port_rpc['security_group_rules'])
self.assertEqual(port['port']['allowed_address_pairs'],
port_rpc['allowed_address_pairs'])
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id,
sg2_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': u'ingress',
'source_ip_prefix': u'10.0.0.3/32',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_info_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id}
]},
'sg_member_ips': {sg2_id: {
'IPv4': set([u'10.0.0.3']),
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'],
ports_rpc['sg_member_ips'][sg2_id]['IPv4'])
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_rules_for_devices_ipv6_ingress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
dhcp_port = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': FAKE_IP['IPv6_DHCP']}],
device_owner=const.DEVICE_OWNER_DHCP,
security_groups=[sg1_id])
dhcp_rest = self.deserialize(self.fmt, dhcp_port)
dhcp_mac = dhcp_rest['port']['mac_address']
dhcp_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
const.IPV6_LLA_PREFIX,
dhcp_mac))
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': fake_gateway,
'source_port_range_min': const.ICMPV6_TYPE_RA},
{'direction': 'ingress',
'ethertype': ethertype,
'port_range_max': dest_port,
'port_range_min': dest_port,
'protocol': const.PROTO_NAME_UDP,
'source_ip_prefix': dhcp_lla_ip,
'source_port_range_max': source_port,
'source_port_range_min': source_port}
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_info_for_devices_only_ipv6_rule(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22', remote_group_id=sg1_id,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22, 'port_range_min': 22,
'remote_group_id': sg1_id}
]},
'sg_member_ips': {sg1_id: {
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'],
ports_rpc['sg_member_ips'][sg1_id]['IPv6'])
self._delete('ports', port_id1)
def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP['IPv6_GLOBAL']
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6,
ipv6_ra_mode=const.IPV6_SLAAC
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
# Create gateway port
gateway_res = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': fake_gateway}],
device_owner='network:router_interface')
gateway_mac = gateway_res['port']['mac_address']
gateway_port_id = gateway_res['port']['id']
gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
const.IPV6_LLA_PREFIX,
gateway_mac))
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': gateway_lla_ip,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
# Note(xuhanp): remove gateway port's fixed_ips or gateway port
# deletion will be prevented.
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, gateway_port_id)
self.deserialize(self.fmt, req.get_response(self.api))
self._delete('ports', gateway_port_id)
def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP['IPv6_GLOBAL']
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6,
ipv6_ra_mode=const.IPV6_SLAAC
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
# Create gateway port
gateway_res = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': fake_gateway}],
device_owner='network:router_interface')
gateway_mac = gateway_res['port']['mac_address']
gateway_port_id = gateway_res['port']['id']
gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
const.IPV6_LLA_PREFIX,
gateway_mac))
# Create another router interface port
interface_res = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
device_owner='network:router_interface')
interface_port_id = interface_res['port']['id']
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': gateway_lla_ip,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, gateway_port_id)
self.deserialize(self.fmt, req.get_response(self.api))
req = self.new_update_request('ports', data, interface_port_id)
self.deserialize(self.fmt, req.get_response(self.api))
self._delete('ports', gateway_port_id)
self._delete('ports', interface_port_id)
def test_security_group_ra_rules_for_devices_ipv6_dvr(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP['IPv6_GLOBAL']
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6,
ipv6_ra_mode=const.IPV6_SLAAC
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
# Create DVR router interface port
gateway_res = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': fake_gateway}],
device_owner=const.DEVICE_OWNER_DVR_INTERFACE)
gateway_mac = gateway_res['port']['mac_address']
gateway_port_id = gateway_res['port']['id']
gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
const.IPV6_LLA_PREFIX,
gateway_mac))
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': gateway_lla_ip,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
# Note(xuhanp): remove gateway port's fixed_ips or gateway port
# deletion will be prevented.
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, gateway_port_id)
self.deserialize(self.fmt, req.get_response(self.api))
self._delete('ports', gateway_port_id)
def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP['IPv6_LLA']
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6,
ipv6_ra_mode=const.IPV6_SLAAC
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': fake_gateway,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=None, cidr=fake_prefix,
ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': fake_gateway,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_source_group(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25',
ethertype=const.IPv6,
remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id,
sg2_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ports_rest2 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg2_id])
port_id2 = ports_rest2['port']['id']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': 'ingress',
'source_ip_prefix': '2001:db8::2/128',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP_V6,
'ethertype': const.IPv6,
'source_ip_prefix': fake_gateway,
'source_port_range_min': const.ICMPV6_TYPE_RA},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase):
def test_init_firewall_with_none_driver(self):
set_enable_security_groups(False)
agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock())
self.assertEqual(agent.firewall.__class__.__name__,
'NoopFirewallDriver')
class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase):
def setUp(self, defer_refresh_firewall=False):
super(BaseSecurityGroupAgentRpcTestCase, self).setUp()
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock(),
defer_refresh_firewall=defer_refresh_firewall)
mock.patch('neutron.agent.linux.iptables_manager').start()
self.default_firewall = self.agent.firewall
self.firewall = mock.Mock()
firewall_object = firewall_base.FirewallDriver()
self.firewall.defer_apply.side_effect = firewall_object.defer_apply
self.agent.firewall = self.firewall
self.fake_device = {'device': 'fake_device',
'network_id': 'fake_net',
'security_groups': ['fake_sgid1', 'fake_sgid2'],
'security_group_source_groups': ['fake_sgid2'],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports = {'fake_device': self.fake_device}
class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentRpcTestCase, self).setUp(
defer_refresh_firewall)
rpc = self.agent.plugin_rpc
rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
rpc.security_group_rules_for_devices.return_value = (
self.firewall.ports)
def test_prepare_and_remove_devices_filter(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
# ignore device which is not filtered
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.prepare_port_filter(
self.fake_device),
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_prepare_devices_filter_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_prepare_devices_filter_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_security_groups_rule_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
def test_security_groups_rule_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
def test_security_groups_member_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
def test_security_groups_member_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
def test_security_groups_provider_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall(None)])
def test_refresh_firewall(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
def test_refresh_firewall_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
def test_refresh_firewall_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
class SecurityGroupAgentEnhancedRpcTestCase(
BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp(
defer_refresh_firewall=defer_refresh_firewall)
fake_sg_info = {
'security_groups': collections.OrderedDict([
('fake_sgid2', []),
('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]),
'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}},
'devices': self.firewall.ports}
self.agent.plugin_rpc.security_group_info_for_devices.return_value = (
fake_sg_info)
def test_prepare_and_remove_devices_filter_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
# these two mocks are too long, just use tmp_mock to replace them
tmp_mock1 = mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])
tmp_mock2 = mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []})
# ignore device which is not filtered
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.prepare_port_filter(
self.fake_device),
mock.call.update_security_group_rules(
'fake_sgid2', []),
tmp_mock1,
tmp_mock2,
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_security_groups_rule_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
def test_security_groups_rule_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
def test_security_groups_member_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(
['fake_sgid2', 'fake_sgid3'])
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
def test_security_groups_member_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(
['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
def test_security_groups_provider_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall(None)])
def test_refresh_firewall_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []}),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []})]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []
}),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []})
]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none_enhanced_rpc(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
class SecurityGroupAgentRpcWithDeferredRefreshTestCase(
SecurityGroupAgentRpcTestCase):
def setUp(self):
super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp(
defer_refresh_firewall=True)
@contextlib.contextmanager
def add_fake_device(self, device, sec_groups, source_sec_groups=None):
fake_device = {'device': device,
'security_groups': sec_groups,
'security_group_source_groups': source_sec_groups or [],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports[device] = fake_device
yield
del self.firewall.ports[device]
def test_security_groups_rule_updated(self):
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
def test_multiple_security_groups_rule_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
def test_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1',
'fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_multiple_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_security_groups_member_updated(self):
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
def test_multiple_security_groups_member_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_member_updated(['fake_sgid1',
'fake_sgid3'])
self.agent.security_groups_member_updated(['fake_sgid2',
'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
def test_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid2']):
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_multiple_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid1B']):
self.agent.security_groups_member_updated(['fake_sgid1B'])
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_security_groups_provider_updated(self):
self.agent.security_groups_provider_updated(None)
self.assertTrue(self.agent.global_refresh_firewall)
def test_security_groups_provider_updated_devices_specified(self):
self.agent.security_groups_provider_updated(
['fake_device_1', 'fake_device_2'])
self.assertFalse(self.agent.global_refresh_firewall)
self.assertIn('fake_device_1', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_setup_port_filters_new_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.assertFalse(self.agent.refresh_firewall.called)
def test_setup_port_filters_updated_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
def test_setup_port_filter_new_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']),
set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
def test_setup_port_filters_sg_updates_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
def test_setup_port_filters_sg_updates_and_new_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
def _test_prepare_devices_filter(self, devices):
# simulate an RPC arriving and calling _security_group_updated()
self.agent.devices_to_refilter |= set(['fake_new_device'])
def test_setup_port_filters_new_port_and_rpc(self):
# Make sure that if an RPC arrives and adds a device to
# devices_to_refilter while we are in setup_port_filters()
# that it is not cleared, and will be processed later.
self.agent.prepare_devices_filter = self._test_prepare_devices_filter
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['new_device', 'fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['new_device']), set())
self.assertEqual(self.agent.devices_to_refilter,
set(['fake_new_device']))
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
def test_setup_port_filters_sg_updates_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(), set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
def test_setup_port_filters_all_updates(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(['fake_new_device']),
set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
def test_setup_port_filters_no_update(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.agent.prepare_devices_filter.called)
def test_setup_port_filters_with_global_refresh(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = True
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with()
self.assertFalse(self.agent.prepare_devices_filter.called)
class FakeSGNotifierAPI(sg_rpc.SecurityGroupAgentRpcApiMixin):
def __init__(self):
self.topic = 'fake'
target = oslo_messaging.Target(topic=self.topic, version='1.0')
self.client = n_rpc.get_client(target)
class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupAgentRpcApiTestCase, self).setUp()
self.notifier = FakeSGNotifierAPI()
self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare',
return_value=self.notifier.client).start()
self.mock_cast = mock.patch.object(self.notifier.client,
'cast').start()
def test_security_groups_provider_updated(self):
self.notifier.security_groups_provider_updated(None)
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_provider_updated',
devices_to_update=None)])
def test_security_groups_rule_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_rule_updated',
security_groups=['fake_sgid'])])
def test_security_groups_member_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_member_updated',
security_groups=['fake_sgid'])])
def test_security_groups_rule_not_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=[])
self.assertEqual(False, self.mock_cast.called)
def test_security_groups_member_not_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=[])
self.assertEqual(False, self.mock_cast.called)
#Note(nati) bn -> binary_name
# id -> device_id
PHYSDEV_MOD = '-m physdev'
PHYSDEV_IS_BRIDGED = '--physdev-is-bridged'
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'physdev_mod': PHYSDEV_MOD,
'physdev_is_bridged': PHYSDEV_IS_BRIDGED}
CHAINS_MANGLE = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark'
IPTABLES_ARG['chains'] = CHAINS_MANGLE
IPTABLES_MANGLE = """# Generated by iptables_manager
*mangle
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING
[0:0] -A %(bn)s-PREROUTING -j %(bn)s-mark
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
# These Dicts use the same keys as devices2 and devices3 in
# TestSecurityGroupAgentWithIptables() to ensure that the ordering
# is consistent regardless of hashseed value
PORTS = {'tap_port1': 'port1', 'tap_port2': 'port2'}
MACS = {'tap_port1': '12:34:56:78:9A:BC', 'tap_port2': '12:34:56:78:9A:BD'}
IPS = {'tap_port1': '10.0.0.3/32', 'tap_port2': '10.0.0.4/32'}
ports_values = list(PORTS.values())
macs_values = list(MACS.values())
ips_values = list(IPS.values())
IPTABLES_ARG['port1'] = ports_values[0]
IPTABLES_ARG['port2'] = ports_values[1]
IPTABLES_ARG['mac1'] = macs_values[0]
IPTABLES_ARG['mac2'] = macs_values[1]
IPTABLES_ARG['ip1'] = ips_values[0]
IPTABLES_ARG['ip2'] = ips_values[1]
IPTABLES_ARG['chains'] = CHAINS_NAT
IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager
*raw
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager
*raw
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_port1 -j CT --zone 1
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_port1 -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager
*raw
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_%(port1)s \
-j CT --zone 1
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port1)s -j CT --zone 1
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_%(port2)s \
-j CT --zone 1
[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port2)s -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_NAT = """# Generated by iptables_manager
*nat
:neutron-postrouting-bottom - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING
[0:0] -A POSTROUTING -j neutron-postrouting-bottom
[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat
[0:0] -A %(bn)s-snat -j %(bn)s-float-snat
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_RAW = 'OUTPUT|PREROUTING'
IPTABLES_ARG['chains'] = CHAINS_RAW
IPTABLES_RAW = """# Generated by iptables_manager
*raw
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A PREROUTING -j %(bn)s-PREROUTING
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback'
CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1'
CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2'
IPTABLES_ARG['chains'] = CHAINS_1
IPSET_FILTER_1 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_port1 -s 10.0.0.2/32 -p udp -m udp --sport 67 --dport 68 \
-j RETURN
[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_port1 -m set --match-set NIPv4security_group1 src -j \
RETURN
[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-s_port1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
[0:0] -A %(bn)s-s_port1 -j DROP
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_port1 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_port1 -s 10.0.0.2/32 -p udp -m udp --sport 67 --dport 68 \
-j RETURN
[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-s_port1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
[0:0] -A %(bn)s-s_port1 -j DROP
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_port1 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1_2 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_port1 -s 10.0.0.2/32 -p udp -m udp --sport 67 --dport 68 \
-j RETURN
[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_port1 -s 10.0.0.4/32 -j RETURN
[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-s_port1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
[0:0] -A %(bn)s-s_port1 -j DROP
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_port1 -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPSET_FILTER_2 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NIPv4security_group1 src -j \
RETURN
[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-s_%(port1)s -s %(ip1)s -m mac --mac-source %(mac1)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port1)s -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-s_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NIPv4security_group1 src -j \
RETURN
[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-s_%(port2)s -s %(ip2)s -m mac --mac-source %(mac2)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port2)s -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-s_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPSET_FILTER_2_3 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NIPv4security_group1 src -j \
RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmp -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-s_%(port1)s -s %(ip1)s -m mac --mac-source %(mac1)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port1)s -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-s_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NIPv4security_group1 src -j \
RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmp -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-s_%(port2)s -s %(ip2)s -m mac --mac-source %(mac2)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port2)s -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-s_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s %(ip2)s -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-s_%(port1)s -s %(ip1)s -m mac --mac-source %(mac1)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port1)s -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-s_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-s_%(port2)s -s %(ip2)s -m mac --mac-source %(mac2)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port2)s -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-s_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
# These Dicts use the same keys as devices2 and devices3 in
# TestSecurityGroupAgentWithIptables() to ensure that the ordering
# is consistent regardless of hashseed value
REVERSE_PORT_ORDER = {'tap_port1': False, 'tap_port2': True}
reverse_port_order_values = list(REVERSE_PORT_ORDER.values())
IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
""" % IPTABLES_ARG
if reverse_port_order_values[0]:
IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port1)s -s %(ip2)s "
"-j RETURN\n"
% IPTABLES_ARG)
IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-s_%(port1)s -s %(ip1)s -m mac --mac-source %(mac1)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port1)s -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-s_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
""" % IPTABLES_ARG
if not reverse_port_order_values[0]:
IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s "
"-j RETURN\n"
% IPTABLES_ARG)
IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-s_%(port2)s -s %(ip2)s -m mac --mac-source %(mac2)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port2)s -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-s_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2_3 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -s %(ip2)s -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmp -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-s_%(port1)s -s %(ip1)s -m mac --mac-source %(mac1)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port1)s -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-s_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmp -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-s_%(port2)s -s %(ip2)s -m mac --mac-source %(mac2)s \
-j RETURN
[0:0] -A %(bn)s-s_%(port2)s -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 68 --dport 67 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-s_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 67 --dport 68 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_1
IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 130 -j RETURN
[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 131 -j RETURN
[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 132 -j RETURN
[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 135 -j RETURN
[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 136 -j RETURN
[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
[0:0] -A %(bn)s-o_port1 -p icmpv6 --icmpv6-type 134 -j DROP
[0:0] -A %(bn)s-o_port1 -p icmpv6 -j RETURN
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 546 --dport 547 -j RETURN
[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 547 --dport 546 -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
[0:0] -A %(bn)s-i_%(port1)s -p icmpv6 --icmpv6-type 130 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmpv6 --icmpv6-type 131 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmpv6 --icmpv6-type 132 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmpv6 --icmpv6-type 135 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -p icmpv6 --icmpv6-type 136 -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
[0:0] -A %(bn)s-o_%(port1)s -p icmpv6 --icmpv6-type 134 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -p icmpv6 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 546 --dport 547 -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -p udp -m udp --sport 547 --dport 546 -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port1)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port1)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
[0:0] -A %(bn)s-i_%(port2)s -p icmpv6 --icmpv6-type 130 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmpv6 --icmpv6-type 131 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmpv6 --icmpv6-type 132 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmpv6 --icmpv6-type 135 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -p icmpv6 --icmpv6-type 136 -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-i_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
[0:0] -A %(bn)s-o_%(port2)s -p icmpv6 --icmpv6-type 134 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -p icmpv6 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 546 --dport 547 -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -p udp -m udp --sport 547 --dport 546 -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state INVALID -j DROP
[0:0] -A %(bn)s-o_%(port2)s -m state --state RELATED,ESTABLISHED -j RETURN
[0:0] -A %(bn)s-o_%(port2)s -j %(bn)s-sg-fallback
[0:0] -A %(bn)s-sg-chain -j ACCEPT
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager
*filter
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
[0:0] -A FORWARD -j neutron-filter-top
[0:0] -A OUTPUT -j neutron-filter-top
[0:0] -A neutron-filter-top -j %(bn)s-local
[0:0] -A INPUT -j %(bn)s-INPUT
[0:0] -A OUTPUT -j %(bn)s-OUTPUT
[0:0] -A FORWARD -j %(bn)s-FORWARD
[0:0] -A %(bn)s-sg-fallback -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER
PHYSDEV_INGRESS = 'physdev-out'
PHYSDEV_EGRESS = 'physdev-in'
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
super(TestSecurityGroupAgentWithIptables, self).setUp()
set_firewall_driver(self.FIREWALL_DRIVER)
cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT')
self.utils_exec = mock.patch(
'neutron.agent.linux.utils.execute').start()
self.rpc = mock.Mock()
self._init_agent(defer_refresh_firewall)
if test_rpc_v1_1:
self.rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
self.iptables = self.agent.firewall.iptables
# TODO(jlibosva) Get rid of mocking iptables execute and mock out
# firewall instead
self.iptables.use_ipv6 = True
self.iptables_execute = mock.patch.object(self.iptables,
"execute").start()
self.iptables_execute_return_values = []
self.expected_call_count = 0
self.expected_calls = []
self.expected_process_inputs = []
self.iptables_execute.side_effect = self.iptables_execute_return_values
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.4/32',
'ethertype': const.IPv4}]
rule3 = rule2[:]
rule3 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
rule4 = rule1[:]
rule4 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.3/32',
'ethertype': const.IPv4}]
rule5 = rule4[:]
rule5 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
self.devices1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule1)}
self.devices2 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule2),
'tap_port2': self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule4)}
self.devices3 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule3),
'tap_port2': self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule5)}
def _init_agent(self, defer_refresh_firewall):
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
defer_refresh_firewall=defer_refresh_firewall)
def _device(self, device, ip, mac_address, rule):
return {'device': device,
'network_id': 'fakenet',
'fixed_ips': [ip],
'mac_address': mac_address,
'security_groups': ['security_group1'],
'security_group_rules': rule,
'security_group_source_groups': [
'security_group1']}
def _regex(self, value):
value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS)
value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS)
value = value.replace('\n', '\\n')
value = value.replace('[', '\[')
value = value.replace(']', '\]')
value = value.replace('*', '\*')
return value
def _register_mock_call(self, *args, **kwargs):
return_value = kwargs.pop('return_value', None)
self.iptables_execute_return_values.append(return_value)
has_process_input = 'process_input' in kwargs
process_input = kwargs.get('process_input')
self.expected_process_inputs.append((has_process_input, process_input))
if has_process_input:
kwargs['process_input'] = mock.ANY
self.expected_calls.append(mock.call(*args, **kwargs))
self.expected_call_count += 1
def _verify_mock_calls(self):
self.assertEqual(self.expected_call_count,
self.iptables_execute.call_count)
self.iptables_execute.assert_has_calls(self.expected_calls)
for i, expected in enumerate(self.expected_process_inputs):
check, expected_regex = expected
if not check:
continue
# The second or later arguments of self.iptables.execute
# are keyword parameter, so keyword argument is extracted by [1]
kwargs = self.iptables_execute.call_args_list[i][1]
self.assertThat(kwargs['process_input'],
matchers.MatchesRegex(expected_regex))
expected = ['net.bridge.bridge-nf-call-arptables=1',
'net.bridge.bridge-nf-call-ip6tables=1',
'net.bridge.bridge-nf-call-iptables=1']
for e in expected:
self.utils_exec.assert_any_call(['sysctl', '-w', e],
run_as_root=True)
def _replay_iptables(self, v4_filter, v6_filter, raw):
self._register_mock_call(
['iptables-save', '-c'],
run_as_root=True,
return_value='')
self._register_mock_call(
['iptables-restore', '-c'],
process_input=self._regex(raw + IPTABLES_NAT +
IPTABLES_MANGLE + v4_filter),
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-save', '-c'],
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-restore', '-c'],
process_input=self._regex(raw + v6_filter),
run_as_root=True,
return_value='')
def test_prepare_remove_port(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
class TestSecurityGroupAgentEnhancedRpcWithIptables(
TestSecurityGroupAgentWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp(
defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False)
self.sg_info = self.rpc.security_group_info_for_devices
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4},
{'direction': 'ingress',
'remote_group_id': 'security_group1',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
devices_info1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[])}
self.devices_info1 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32'], 'IPv6': []}},
'devices': devices_info1}
devices_info2 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[]),
'tap_port2': self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
[])}
self.devices_info2 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
self.devices_info3 = {'security_groups': {'security_group1': rule2},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
def test_prepare_remove_port(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
class TestSecurityGroupAgentEnhancedIpsetWithIptables(
TestSecurityGroupAgentEnhancedRpcWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp(
defer_refresh_firewall)
self.agent.firewall.enable_ipset = True
self.ipset = self.agent.firewall.ipset
self.ipset_execute = mock.patch.object(self.ipset,
"execute").start()
def test_prepare_remove_port(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self._replay_iptables(IPSET_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
class SGNotificationTestMixin(object):
def test_security_group_rule_updated(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description):
security_group_id = sg['security_group']['id']
rule = self._build_security_group_rule(
security_group_id,
direction='ingress',
proto=const.PROTO_NAME_TCP)
security_group_rule = self._make_security_group_rule(self.fmt,
rule)
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id]),
mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id])])
def test_security_group_member_updated(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
security_group_id)
self._delete('ports', port['port']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_member_updated(
mock.ANY, [mock.ANY])])
class TestSecurityGroupAgentWithOVSIptables(
TestSecurityGroupAgentWithIptables):
FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
super(TestSecurityGroupAgentWithOVSIptables, self).setUp(
defer_refresh_firewall,
test_rpc_v1_1)
def _init_agent(self, defer_refresh_firewall):
fake_map = ovs_neutron_agent.LocalVLANMapping(1, 'network_type',
'physical_network', 1)
local_vlan_map = {'fakenet': fake_map}
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
local_vlan_map=local_vlan_map,
defer_refresh_firewall=defer_refresh_firewall)
def test_prepare_remove_port(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
def _regex(self, value):
#Note(nati): tap is prefixed on the device
# in the OVSHybridIptablesFirewallDriver
value = value.replace('tap_port', 'taptap_port')
value = value.replace('qvbtaptap_port', 'qvbtap_port')
value = value.replace('o_port', 'otap_port')
value = value.replace('i_port', 'itap_port')
value = value.replace('s_port', 'stap_port')
return super(
TestSecurityGroupAgentWithOVSIptables,
self)._regex(value)
class TestSecurityGroupExtensionControl(base.BaseTestCase):
def test_disable_security_group_extension_by_config(self):
set_enable_security_groups(False)
exp_aliases = ['dummy1', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
def test_enable_security_group_extension_by_config(self):
set_enable_security_groups(True)
exp_aliases = ['dummy1', 'security-group', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
def test_is_invalid_drvier_combination_sg_enabled(self):
set_enable_security_groups(True)
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.assertFalse(sg_rpc._is_valid_driver_combination())
def test_is_invalid_drvier_combination_sg_enabled_with_none(self):
set_enable_security_groups(True)
set_firewall_driver(None)
self.assertFalse(sg_rpc._is_valid_driver_combination())
def test_is_invalid_drvier_combination_sg_disabled(self):
set_enable_security_groups(False)
set_firewall_driver('NonNoopDriver')
self.assertFalse(sg_rpc._is_valid_driver_combination())
def test_is_valid_drvier_combination_sg_enabled(self):
set_enable_security_groups(True)
set_firewall_driver('NonNoopDriver')
self.assertTrue(sg_rpc._is_valid_driver_combination())
def test_is_valid_drvier_combination_sg_disabled(self):
set_enable_security_groups(False)
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.assertTrue(sg_rpc._is_valid_driver_combination())
def test_is_valid_drvier_combination_sg_disabled_with_none(self):
set_enable_security_groups(False)
set_firewall_driver(None)
self.assertTrue(sg_rpc._is_valid_driver_combination())
| apache-2.0 | 7,476,213,703,544,293,000 | 45.627169 | 79 | 0.550896 | false |
timj/scons | src/engine/SCons/Tool/rpm.py | 1 | 4470 | """SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.get_abspath() )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].get_abspath() ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=C is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.get_abspath() )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 5,327,432,468,304,638,000 | 32.863636 | 92 | 0.650783 | false |
svieira/Flask-HipPocket | flask_hippocket/tasks.py | 1 | 3301 | # -*- coding: utf-8 -*-
"""
flask.ext.hippocket.tasks
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013 by Sean Vieira.
:license: MIT, see LICENSE for more details.
"""
from flask import Blueprint, Markup, request, render_template
from itertools import chain
from os import path
from pkgutil import walk_packages
from werkzeug.utils import import_string
from werkzeug.exceptions import default_exceptions, HTTPException
def autoload(app, apps_package="apps", module_name="routes", blueprint_name="routes", on_error=None):
"""Automatically load Blueprints from the specified package and registers them with Flask."""
if not apps_package:
raise ValueError("No apps package provided - unable to begin autoload")
if isinstance(apps_package, basestring):
package_code = import_string(apps_package)
else:
#: `apps_package` can be the already imported parent package
#: (i.e. the following is a licit pattern)::
#:
#: import app_package
#: # do something else with app_package
#: autoload(app, app_package)
package_code = apps_package
apps_package = apps_package.__name__
package_paths = package_code.__path__
package_paths = [path.join(app.root_path, p) for p in package_paths]
root = apps_package
apps_package = apps_package + u"." if not apps_package.endswith(".") else apps_package
if on_error is None:
on_error = lambda name: app.logger.warn("Unable to import {name}.".format(name=name))
_to_import = "{base}.{module}.{symbol}"
import_template = lambda base: _to_import.format(base=base,
module=module_name,
symbol=blueprint_name)
#: Autoloaded apps must be Python packages
#: The root of the package is also inspected for a routing file
package_contents = chain([[None, root, True]],
walk_packages(path=package_paths, prefix=apps_package, onerror=on_error))
for _, sub_app_name, is_pkg in package_contents:
if not is_pkg:
continue
sub_app_import_path = import_template(base=sub_app_name)
sub_app = import_string(sub_app_import_path)
if isinstance(sub_app, Blueprint):
app.register_blueprint(sub_app)
else:
app.logger.warn(("Failed to register {name} - "
"it does not match the registration pattern.").format(name=sub_app_name))
def setup_errors(app, error_template="errors.html"):
"""Add a handler for each of the available HTTP error responses."""
def error_handler(error):
if isinstance(error, HTTPException):
description = error.get_description(request.environ)
code = error.code
name = error.name
else:
description = error
code = 500
name = "Internal Server Error"
return render_template(error_template,
code=code,
name=Markup(name),
description=Markup(description))
for exception in default_exceptions:
app.register_error_handler(exception, error_handler)
| mit | -5,610,174,353,615,824,000 | 37.383721 | 105 | 0.604059 | false |
mscherer/rpmlint | InitScriptCheck.py | 1 | 11946 | # -*- coding: utf-8 -*-
#############################################################################
# Project : Mandriva Linux
# Module : rpmlint
# File : InitScriptCheck.py
# Author : Frederic Lepied
# Created On : Fri Aug 25 09:26:37 2000
# Purpose : check init scripts (files in /etc/rc.d/init.d)
#############################################################################
import os
import re
import sys
import rpm
from Filter import addDetails, printError, printWarning
import AbstractCheck
import Config
import Pkg
chkconfig_content_regex = re.compile('^\s*#\s*chkconfig:\s*([-0-9]+)\s+[-0-9]+\s+[-0-9]+')
subsys_regex = re.compile('/var/lock/subsys/([^/"\'\n\s;&|]+)', re.MULTILINE)
chkconfig_regex = re.compile('^[^#]*(chkconfig|add-service|del-service)', re.MULTILINE)
status_regex = re.compile('^[^#]*status', re.MULTILINE)
reload_regex = re.compile('^[^#]*reload', re.MULTILINE)
use_deflevels = Config.getOption('UseDefaultRunlevels', True)
lsb_tags_regex = re.compile('^# ([\w-]+):\s*(.*?)\s*$')
lsb_cont_regex = re.compile('^#(?:\t| )(.*?)\s*$')
use_subsys = Config.getOption('UseVarLockSubsys', True)
LSB_KEYWORDS = ('Provides', 'Required-Start', 'Required-Stop', 'Should-Start',
'Should-Stop', 'Default-Start', 'Default-Stop',
'Short-Description', 'Description')
RECOMMENDED_LSB_KEYWORDS = ('Provides', 'Required-Start', 'Required-Stop',
'Default-Stop', 'Short-Description')
class InitScriptCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, 'InitScriptCheck')
def check_binary(self, pkg):
initscript_list = []
for fname, pkgfile in pkg.files().items():
if not fname.startswith('/etc/init.d/') and \
not fname.startswith('/etc/rc.d/init.d/'):
continue
basename = os.path.basename(fname)
initscript_list.append(basename)
if pkgfile.mode & int("500", 8) != int("500", 8):
printError(pkg, 'init-script-non-executable', fname)
if "." in basename:
printError(pkg, 'init-script-name-with-dot', fname)
# check chkconfig call in %post and %preun
postin = pkg[rpm.RPMTAG_POSTIN] or \
pkg.scriptprog(rpm.RPMTAG_POSTINPROG)
if not postin:
printError(pkg, 'init-script-without-chkconfig-postin', fname)
elif not chkconfig_regex.search(postin):
printError(pkg, 'postin-without-chkconfig', fname)
preun = pkg[rpm.RPMTAG_PREUN] or \
pkg.scriptprog(rpm.RPMTAG_PREUNPROG)
if not preun:
printError(pkg, 'init-script-without-chkconfig-preun', fname)
elif not chkconfig_regex.search(preun):
printError(pkg, 'preun-without-chkconfig', fname)
status_found = False
reload_found = False
chkconfig_content_found = False
subsys_regex_found = False
in_lsb_tag = False
in_lsb_description = False
lastline = ''
lsb_tags = {}
# check common error in file content
content = None
try:
content = [x for x in Pkg.readlines(pkgfile.path)]
except Exception:
e = sys.exc_info()[1]
printWarning(pkg, 'read-error', e)
continue
content_str = "".join(content)
for line in content:
line = line[:-1] # chomp
# TODO check if there is only one line like this
if line.startswith('### BEGIN INIT INFO'):
in_lsb_tag = True
continue
if line.endswith('### END INIT INFO'):
in_lsb_tag = False
for kw, vals in lsb_tags.items():
if len(vals) != 1:
printError(pkg, 'redundant-lsb-keyword', kw)
for kw in RECOMMENDED_LSB_KEYWORDS:
if kw not in lsb_tags:
printWarning(pkg, 'missing-lsb-keyword',
"%s in %s" % (kw, fname))
if in_lsb_tag:
# TODO maybe we do not have to handle this ?
if lastline.endswith('\\'):
line = lastline + line
else:
res = lsb_tags_regex.search(line)
if not res:
cres = lsb_cont_regex.search(line)
if not (in_lsb_description and cres):
in_lsb_description = False
printError(
pkg, 'malformed-line-in-lsb-comment-block',
line)
else:
lsb_tags["Description"][-1] += \
" " + cres.group(1)
else:
tag = res.group(1)
if not tag.startswith('X-') and \
tag not in LSB_KEYWORDS:
printError(pkg, 'unknown-lsb-keyword', line)
else:
in_lsb_description = (tag == 'Description')
if tag not in lsb_tags:
lsb_tags[tag] = []
lsb_tags[tag].append(res.group(2))
lastline = line
if not status_found and status_regex.search(line):
status_found = True
if not reload_found and reload_regex.search(line):
reload_found = True
res = chkconfig_content_regex.search(line)
if res:
chkconfig_content_found = True
if use_deflevels:
if res.group(1) == '-':
printWarning(pkg, 'no-default-runlevel', fname)
elif res.group(1) != '-':
printWarning(pkg, 'service-default-enabled', fname)
res = subsys_regex.search(line)
if res:
subsys_regex_found = True
name = res.group(1)
if use_subsys and name != basename:
error = True
if name[0] == '$':
value = Pkg.substitute_shell_vars(name,
content_str)
if value == basename:
error = False
else:
i = name.find('}')
if i != -1:
name = name[0:i]
error = name != basename
if error and len(name):
if name[0] == '$':
printWarning(pkg, 'incoherent-subsys', fname,
name)
else:
printError(pkg, 'incoherent-subsys', fname,
name)
if "Default-Start" in lsb_tags:
if "".join(lsb_tags["Default-Start"]):
printWarning(pkg, 'service-default-enabled', fname)
if not status_found:
printError(pkg, 'no-status-entry', fname)
if not reload_found:
printWarning(pkg, 'no-reload-entry', fname)
if not chkconfig_content_found:
printError(pkg, 'no-chkconfig-line', fname)
if not subsys_regex_found and use_subsys:
printError(pkg, 'subsys-not-used', fname)
elif subsys_regex_found and not use_subsys:
printError(pkg, 'subsys-unsupported', fname)
if len(initscript_list) == 1:
pkgname = re.sub("-sysvinit$", "", pkg.name.lower())
goodnames = (pkgname, pkgname + 'd')
if initscript_list[0] not in goodnames:
printWarning(pkg, 'incoherent-init-script-name',
initscript_list[0], str(goodnames))
# Create an object to enable the auto registration of the test
check = InitScriptCheck()
addDetails(
'init-script-without-chkconfig-postin',
'''The package contains an init script but doesn't contain a %post with
a call to chkconfig.''',
'postin-without-chkconfig',
'''The package contains an init script but doesn't call chkconfig in its
%post script.''',
'init-script-without-chkconfig-preun',
'''The package contains an init script but doesn't contain a %preun with
a call to chkconfig.''',
'preun-without-chkconfig',
'''The package contains an init script but doesn't call chkconfig in its
%preun script.''',
'missing-lsb-keyword',
'''The package contains an init script that does not contain one of the LSB
init script comment block convention keywords that are recommendable for all
init scripts. If there is nothing to add to a keyword's value, include the
keyword in the script with an empty value. Note that as of version 3.2, the
LSB specification does not mandate presence of any keywords.''',
'no-status-entry',
'''In your init script (/etc/rc.d/init.d/your_file), you don't
have a 'status' entry, which is necessary for good functionality.''',
'no-reload-entry',
'''In your init script (/etc/rc.d/init.d/your_file), you don't
have a 'reload' entry, which is necessary for good functionality.''',
'no-chkconfig-line',
'''The init script doesn't contain a chkconfig line to specify the runlevels
at which to start and stop it.''',
'no-default-runlevel',
'''The default runlevel isn't specified in the init script.''',
'service-default-enabled',
'''The service is enabled by default after "chkconfig --add"; for security
reasons, most services should not be. Use "-" as the default runlevel in the
init script's "chkconfig:" line and/or remove the "Default-Start:" LSB keyword
to fix this if appropriate for this service.''',
'subsys-unsupported',
'''The init script uses /var/lock/subsys which is not supported by
this distribution.''',
'subsys-not-used',
'''While your daemon is running, you have to put a lock file in
/var/lock/subsys/. To see an example, look at this directory on your
machine and examine the corresponding init scripts.''',
'incoherent-subsys',
'''The filename of your lock file in /var/lock/subsys/ is incoherent
with your actual init script name. For example, if your script name
is httpd, you have to use 'httpd' as the filename in your subsys directory.
It is also possible that rpmlint gets this wrong, especially if the init
script contains nontrivial shell variables and/or assignments. These
cases usually manifest themselves when rpmlint reports that the subsys name
starts a with '$'; in these cases a warning instead of an error is reported
and you should check the script manually.''',
'incoherent-init-script-name',
'''The init script name should be the same as the package name in lower case,
or one with 'd' appended if it invokes a process by that name.''',
'init-script-name-with-dot',
'''The init script name should not contain a dot in its name. Some versions
of chkconfig don't work as expected with init script names like that.''',
'init-script-non-executable',
'''The init script should have at least the execution bit set for root
in order for it to run at boot time.''',
)
# InitScriptCheck.py ends here
# Local variables:
# indent-tabs-mode: nil
# py-indent-offset: 4
# End:
# ex: ts=4 sw=4 et
| gpl-2.0 | -7,705,326,315,198,297,000 | 40.915789 | 90 | 0.536665 | false |
westurner/opengov | docs/conf.py | 1 | 14116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
# add the _ext/ dir for local extensions
sys.path.insert(1, os.path.abspath('_ext'))
# configure paths and names once
project_name = "opengov"
project_name_slug = "opengov"
project_orgname = "westurner"
project_twitter_user = project_orgname
project_author = u"Wes Turner"
project_copyright = u'2014, {}'.format(project_author)
project_github_path = "{}/{}".format(project_orgname, project_name)
project_github_url = "https://github.com/{}".format(project_github_path)
project_src_url = project_github_url
project_src_path = project_github_path
project_url = project_github_url
project_title = u'{} Documentation'.format(project_name)
project_description_oneline = (
u'Documentation for the {} project'.format(project_name))
project_og_site_name = project_src_path # e.g. westurner/opengov
srclink_project = project_github_url
srclink_src_path = 'docs/'
srclink_branch = 'develop'
# current_git_branch=subprocess.check_output("git b") && parse
import opengov
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
#'sphinxcontrib.ansi',
#'sphinxcontrib.programoutput',
#'sphinxcontrib.issuetracker',
'sphinxcontrib.srclinks'
]
try:
import sphinx_git
extensions.append('sphinx_git')
except ImportError:
pass
# TODO
try:
import sphinxcontrib.issuetracker
extensions.append('sphinxcontrib.issuetracker')
issuetracker = 'github'
issuetracker_project = project_github_path
except ImportError:
pass
# TODO
try:
import changelog
extensions.append('changelog')
# section names - optional
changelog_sections = ["general", "rendering", "tests"]
# tags to sort on inside of sections - also optional
changelog_inner_tag_sort = ["feature", "bug"]
changelog_url = project_github_url
# how to render changelog links - these are plain
# python string templates, ticket/pullreq/changeset number goes
# in "%s"
changelog_render_ticket = "%s/issue/%%s" % changelog_url
changelog_render_pullreq = "%s/pullrequest/%%s" % changelog_url
changelog_render_changeset = "%s/changeset/%%s" % changelog_url
except ImportError:
#print("ERROR: failed to import 'changelog'")
pass
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = project_src_path
copyright = project_copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = opengov.__version__
# The full version, including alpha/beta/rc tags.
release = opengov.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------
# see: http://git.io/Pk7SGA
# on_rtd is whether we are on readthedocs.org, this line of code grabbed
# from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#if not on_rtd: # only import and set the theme if we're building docs locally
#import sphinx_rtd_theme
## The theme to use for HTML and HTML Help pages. See the documentation for
## a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
## Add any paths that contain custom themes here, relative to this
## directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basicstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'lang': 'en',
'nosidebar': False,
'rightsidebar': False,
'sidebar_span': 3,
'nav_fixed_top': False,
'nav_fixed': False,
'nav_width': '900px',
'content_fixed': False,
'content_width': '768px',
'row_fixed': False,
'noresponsive': False,
'noresponsiverelbar': True,
'googlewebfont': False,
'googlewebfont_url': 'http://fonts.googleapis.com/css?family=Lily+Script+One',
'googlewebfont_style': u"font-family: 'Lily Script One' cursive;",
'header_inverse': False,
'relbar_inverse': False,
'inner_theme': False,
'inner_theme_name': 'bootswatch-readable',
# 'h1_size': '3.0em',
# 'h2_size': '2.6em',
# 'h3_size': '2.2em',
# 'h4_size': '1.8em',
# 'h5_size': '1.4em',
# 'h6_size': '1.1em',
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'searchbox.html',
'srclinks.html',
'links.html',
],
'index': [
'globaltoc.html',
'relations.html',
'searchbox.html',
'srclinks.html',
'links.html',
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# see: https://pypi.python.org/pypi/pgs (to auto-append .html to paths)
html_link_suffix = ''
# Filename affix
filename_affix = "{}".format(project_name_slug)
# Output file base name for HTML help builder.
htmlhelp_basename = '{}-doc'.format(filename_affix)
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'{}.tex'.format(filename_affix),
project_title,
project_author,
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index',
project_name_slug,
project_title,
[project_author],
1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
project_name_slug,
project_title,
project_author,
project_name,
project_description_oneline,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def configure_meta_tags(app, pagename, templatename, context, doctree):
metatags = context.get('metatags', '')
metatags += """
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- OpenGraph metadata: ogp.me -->
<meta property="og:title" content="{title}" />
<meta property="og:type" content="website" />
<meta property="og:site_name" content="{og_site_name}" />
<!--
<meta property="og:description" content="{description}" />
-->
<meta property="og:image" content="{og_image_url}" />
<meta property="og:image:width" content="{og_image_width}" />
<meta property="og:image:height" content="{og_image_height}" />
<!--
<meta property="og:image:secure_url" content="./_static/img/logo.png" />
-->
<!-- Twitter metadata -->
<meta property="twitter:card" content="summary" />
<meta property="twitter:title" content="{title}" />
<meta property="twitter:description" content="{description}" />
<meta property="twitter:site" content="{twitter_user}" />
<meta property="twitter:creator" content="{twitter_user}" />
""".format(
title=context.get('title',''),
description=context.get('description', project_description_oneline),
og_site_name=project_og_site_name,
og_image_url="", # 470x242.png
og_image_width="470",
og_image_height="242",
twitter_user=project_twitter_user)
context['metatags'] = metatags
def setup(app):
app.add_javascript('js/local.js')
app.add_stylesheet('css/local.css')
app.connect('html-page-context', configure_meta_tags)
if __name__ == "__main__":
context = {}
output = configure_meta_tags(None, None, None, context, None)
print(context.get('metatags'))
| bsd-3-clause | -5,590,527,293,191,413,000 | 30.368889 | 82 | 0.680929 | false |
olsonse/python-vxi11 | doc/conf.py | 1 | 8582 | # -*- coding: utf-8 -*-
#
# VXI-11 for Python documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 7 11:57:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
DOCROOT = os.path.dirname(__file__)
sys.path.insert(0,
os.path.abspath(os.path.join(DOCROOT, os.path.pardir))
)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VXI-11 for Python'
copyright = u'2017, Spencer E. Olson, Jonathan A. Eugenio'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'VXI-11forPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'VXI-11forPython.tex', u'VXI-11 for Python Documentation',
u'Spencer E. Olson, Jonathan A. Eugenio', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'vxi-11forpython', u'VXI-11 for Python Documentation',
[u'Spencer E. Olson, Jonathan A. Eugenio'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'VXI-11forPython', u'VXI-11 for Python Documentation',
u'Spencer E. Olson, Jonathan A. Eugenio', 'VXI-11forPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-2.0 | 6,429,391,774,352,015,000 | 30.785185 | 99 | 0.706246 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.