repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bearops/ebzl | ebzl/modules/ecs.py | 1 | 4547 | from .. lib import (
ecs,
format as fmt,
parameters
)
from . import (
version
)
import os
import json
import argparse
def get_argument_parser():
parser = argparse.ArgumentParser("ebzl ecs")
parameters.add_profile(parse, required=False)
parameters.add_region(parser, required=False)
subparsers = parser.add_subparsers()
# ebzl ecs create
create_parser = subparsers.add_parser(
"create",
help="register a new task")
create_parser.set_defaults(func=create_task)
create_parser.add_argument("--family", required=True)
create_parser.add_argument("--name", required=True)
create_parser.add_argument("--image", required=True)
create_parser.add_argument("--version", default=version.get_version())
create_parser.add_argument("--command", default="")
create_parser.add_argument("--entrypoint", default=[])
create_parser.add_argument("--cpu", default=0)
create_parser.add_argument("--memory", default=250)
create_parser.add_argument("-v", "--var", action="append")
create_parser.add_argument("-f", "--var-file")
# ebzl ecs run
run_parser = subparsers.add_parser(
"run",
help="run registered task")
run_parser.set_defaults(func=run_task)
run_parser.add_argument("--task", required=True)
run_parser.add_argument("--cluster", default="default")
run_parser.add_argument("--command")
run_parser.add_argument("-v", "--var", action="append")
run_parser.add_argument("-f", "--var-file")
# ebzl ecs tasks
tasks_parser = subparsers.add_parser(
"tasks",
help="list available tasks")
tasks_parser.set_defaults(func=list_tasks)
# ebzl ecs clusters
clusters_parser = subparsers.add_parser(
"clusters",
help="list available clusters")
clusters_parser.set_defaults(func=list_clusters)
return parser
def parse_var_entry(var_entry):
parts = var_entry.strip().split("=")
return {"name": parts[0],
"value": "=".join(parts[1:])}
def parse_var_file(fpath):
if not fpath or not os.path.isfile(fpath):
return []
with open(os.path.expanduser(fpath), "rb") as f:
return map(parse_var_entry, f.readlines())
def get_env_options(args):
env_options = []
env_options.extend(parse_var_file(args.var_file))
if args.var:
env_options.extend(map(parse_var_entry, args.var))
return env_options
def get_container_definition(args):
return {
"name": args.name,
"image": "%s:%s" % (args.image, args.version),
"mountPoints": [],
"volumesFrom": [],
"portMappings": [],
"command": map(str.strip, args.command.split()),
"essential": True,
"entryPoint": args.entrypoint,
"links": [],
"cpu": int(args.cpu),
"memory": int(args.memory),
"environment": get_env_options(args)
}
def create_task(args):
conn = ecs.get_conn(profile=args.profile)
conn.register_task_definition(
family="AtlasCron",
containerDefinitions=[get_container_definition(args)])
def run_task(args):
conn = ecs.get_conn(profile=args.profile)
kwargs = {
"cluster": args.cluster,
"taskDefinition": args.task,
"count": 1,
}
if args.command or args.var or args.var_file:
overrides = {}
task = conn.describe_task_definition(taskDefinition=args.task)
overrides["name"] = (task["taskDefinition"]
["containerDefinitions"]
[0]
["name"])
if args.command:
overrides["command"] = map(str.strip, args.command.split())
env_options = get_env_options(args)
if env_options:
overrides["environment"] = env_options
kwargs["overrides"] = {"containerOverrides": [overrides]}
print conn.run_task(**kwargs)
def list_tasks(args):
conn = ecs.get_conn(profile=args.profile)
tasks = conn.list_task_definitions()
fmt.print_list([arn.split("/")[-1]
for arn in tasks["taskDefinitionArns"]])
def list_clusters(args):
conn = ecs.get_conn(profile=args.profile)
clusters = conn.list_clusters()
fmt.print_list([arn.split("/")[-1]
for arn in clusters["clusterArns"]])
def run(argv):
args = parameters.parse(
parser=get_argument_parser(),
argv=argv,
postprocessors=[parameters.add_default_region])
args.func(args)
| bsd-3-clause | -6,729,863,579,245,356,000 | 26.72561 | 74 | 0.607653 | false |
rjungbeck/rasterizer | mupdf12.py | 1 | 2411 | from mupdfbase import MuPdfBase, Matrix, Rect, BBox
from ctypes import cdll,c_float, c_int, c_void_p, Structure, c_char_p,POINTER
FZ_STORE_UNLIMITED=0
class MuPdf(MuPdfBase):
def __init__(self):
self.dll=cdll.libmupdf
self.dll.fz_bound_page.argtypes=[c_void_p, c_void_p, POINTER(Rect)]
self.dll.fz_bound_page.restype=POINTER(Rect)
self.dll.fz_new_pixmap_with_bbox.argtypes=[c_void_p, c_void_p, POINTER(BBox)]
self.dll.fz_new_pixmap_with_bbox.restype=c_void_p
self.dll.fz_run_page.argtypes=[c_void_p, c_void_p, c_void_p, POINTER(Matrix), c_void_p]
self.dll.fz_run_page.restype=None
self.dll.fz_write_pam.argtypes=[c_void_p, c_void_p, c_char_p, c_int]
self.dll.fz_write_pam.restype=None
self.dll.fz_write_pbm.argtypes=[c_void_p, c_void_p, c_char_p]
self.dll.fz_write_pbm.restype=None
self.dll.fz_count_pages.argtypes=[c_void_p]
self.dll.fz_count_pages.restype=c_int
self.dll.fz_open_document_with_stream.argtypes=[c_void_p, c_char_p, c_void_p]
self.dll.fz_open_document_with_stream.restype=c_void_p
self.dll.fz_close_document.argtypes=[c_void_p]
self.dll.fz_close_document.restype=None
self.dll.fz_free_page.argtypes=[c_void_p, c_void_p]
self.dll.fz_free_page.restype=None
self.dll.fz_find_device_colorspace.argtypes=[c_void_p, c_char_p]
self.dll.fz_find_device_colorspace.restype=c_void_p
MuPdfBase.__init__(self)
def getSize(self):
rect=Rect()
self.dll.fz_bound_page(self.doc, self.page,rect)
return rect.x0, rect.y0, rect.x1, rect.y1
def getPageCount(self):
return self.dll.fz_count_pages(self.doc)
def loadPage(self, num):
self.page=self.dll.fz_load_page(self.doc, num-1)
def runPage(self, dev, transform):
self.dll.fz_run_page(self.doc, self.page, dev, transform, None)
def freePage(self):
self.dll.fz_free_page(self.doc, self.page)
self.page=None
def loadDocument(self, context, stream):
self.doc=self.dll.fz_open_document_with_stream(self.context, "application/pdf", self.stream)
def closeDocument(self):
if self.doc:
self.dll.fz_close_document(self.doc)
self.doc=None
def findColorspace(self, colorSpace):
return self.dll.fz_find_device_colorspace(self.context, colorSpace)
def setContext(self):
self.context=self.dll.fz_new_context(None, None, FZ_STORE_UNLIMITED)
| agpl-3.0 | -1,224,500,625,628,163,300 | 28.935897 | 94 | 0.68229 | false |
bsmedberg/socorro | socorro/webapi/servers.py | 1 | 3703 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import web
import os
from socorro.webapi.classPartial import classWithPartialInit
from configman import Namespace, RequiredConfig
#==============================================================================
class WebServerBase(RequiredConfig):
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, services_list):
self.config = config
urls = []
for each in services_list:
if hasattr(each, 'uri'):
# this is the old middleware
uri, cls = each.uri, each
else:
# this is the new middleware_app
uri, cls = each
urls.append(uri)
urls.append(classWithPartialInit(cls, config))
self.urls = tuple(urls)
web.webapi.internalerror = web.debugerror
web.config.debug = False
self._identify()
self._wsgi_func = web.application(self.urls, globals()).wsgifunc()
#--------------------------------------------------------------------------
def run(self):
raise NotImplemented
#--------------------------------------------------------------------------
def _identify(self):
pass
#==============================================================================
class ApacheModWSGI(WebServerBase):
"""When running Apache, modwsgi requires a reference to a "wsgifunc" In
this varient of the WebServer class, the run function returns the result of
the webpy framework's wsgifunc. Applications that use this class must
provide a module level variable 'application' in the module given to Apache
modwsgi configuration. The value of the variable must be the _wsgi_func.
"""
#--------------------------------------------------------------------------
def run(self):
return self._wsgi_func
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info('this is ApacheModWSGI')
#--------------------------------------------------------------------------
@staticmethod
def get_socorro_config_path(wsgi_file):
wsgi_path = os.path.dirname(os.path.realpath(wsgi_file))
config_path = os.path.join(wsgi_path, '..', 'config')
return os.path.abspath(config_path)
#==============================================================================
class StandAloneServer(WebServerBase):
required_config = Namespace()
required_config.add_option(
'port',
doc='the port to listen to for submissions',
default=8882
)
#==============================================================================
class CherryPy(StandAloneServer):
required_config = Namespace()
required_config.add_option(
'ip_address',
doc='the IP address from which to accept submissions',
default='127.0.0.1'
)
#--------------------------------------------------------------------------
def run(self):
web.runsimple(
self._wsgi_func,
(self.config.web_server.ip_address, self.config.web_server.port)
)
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info(
'this is CherryPy from web.py running standalone at %s:%d',
self.config.web_server.ip_address,
self.config.web_server.port
)
| mpl-2.0 | -4,690,380,834,921,110,000 | 34.605769 | 79 | 0.47286 | false |
lgiordani/punch | punch/vcs_repositories/git_flow_repo.py | 1 | 3464 | from __future__ import print_function, absolute_import, division
import subprocess
import os
import six
from punch.vcs_repositories import git_repo as gr
from punch.vcs_repositories.exceptions import (
RepositoryStatusError,
RepositorySystemError
)
class GitFlowRepo(gr.GitRepo):
def __init__(self, working_path, config_obj, files_to_commit=None):
if six.PY2:
super(GitFlowRepo, self).__init__(
working_path, config_obj, files_to_commit)
else:
super().__init__(working_path, config_obj, files_to_commit)
self.release_branch = "release/{}".format(
self.config_obj.options['new_version']
)
def _set_command(self):
self.commands = ['git', 'flow']
self.command = 'git'
def _check_system(self):
# git flow -h returns 1 so the call fails
p = subprocess.Popen(
self.commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
if "git flow <subcommand>" not in stdout.decode('utf8'):
raise RepositorySystemError("Cannot run {}".format(self.commands))
if not os.path.exists(os.path.join(self.working_path, '.git')):
raise RepositorySystemError(
"The current directory {} is not a Git repository".format(
self.working_path))
def pre_start_release(self):
output = self._run([self.command, "status"])
if "Changes to be committed:" in output:
raise RepositoryStatusError(
("Cannot start release while repository "
"contains uncommitted changes")
)
self._run([self.command, "checkout", "develop"])
branch = self.get_current_branch()
if branch != "develop":
raise RepositoryStatusError(
"Current branch shall be develop but is {}".format(branch))
def start_release(self):
self._run(
self.commands + [
"release",
"start",
self.config_obj.options['new_version']
])
def finish_release(self):
branch = self.get_current_branch()
command = [self.command, "add"]
if self.config_obj.include_all_files:
command.append(".")
else:
command.extend(self.config_obj.include_files)
command.extend(self.files_to_commit)
self._run(command)
output = self._run([self.command, "status"])
if "nothing to commit, working directory clean" in output or \
"nothing to commit, working tree clean" in output:
self._run([self.command, "checkout", "develop"])
self._run([self.command, "branch", "-d", branch])
return
message = ["-m", self.config_obj.commit_message]
command_line = [self.command, "commit"]
command_line.extend(message)
self._run(command_line)
self._run(
self.commands + [
"release",
"finish",
"-m",
branch,
self.config_obj.options['new_version']
])
def post_finish_release(self):
pass
def get_info(self):
return [
("Commit message", self.config_obj.commit_message),
("Release branch", self.release_branch),
]
| isc | -688,748,351,345,906,800 | 28.862069 | 78 | 0.556582 | false |
MaisamArif/NEST | backend/tmp_markov_framework/markov_script.py | 1 | 1912 | import numpy as np
import random
def normalize(arr):
s = sum(arr)
if s == 0:
s = 1
arr[0] = 1
for i, val in enumerate(arr):
arr[i] = val/s
def generate(width, height):
matrix = []
for i in range(height):
matrix.append([])
for j in range(width):
matrix[i].append(float(random.randint(0, 1000))/1000)
normalize(matrix[i])
matrix[i] = [round(x, 3) for x in matrix[i]]
return np.matrix(matrix)
def initialize(soc0, soc1):
matricies = []
for i in range(4):
matricies.append(generate(4,4))
#format is as follows [P0, IM0, P1, IM1]
P0, IM0, P1, IM1 = matricies
vm1 = IM1[0:,0] * IM0[0,0:] + IM1[0:,1] * IM0[1,0:] + IM1[0:,2] * IM0[2,0:] +IM1[0:,3] * IM0[3,0:]
vm2 = IM0[0:,0] * IM1[0,0:] + IM0[0:,1] * IM1[1,0:] + IM0[0:,2] * IM1[2,0:] +IM0[0:,3] * IM1[3,0:]
c0_to_c1 = ((1-soc0) * P0)+ (soc0 * vm1)
c1_to_c0 = ((1-soc1) * P1)+ (soc1 * vm2)
matricies.append(c0_to_c1)
matricies.append(c1_to_c0)
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return matricies#, position_and_direction
def traverse(matrix):
rand = float(random.randint(0,1000))/1000
count = 0
for i, elem in enumerate(matrix):
if rand > count and rand < count + elem:
return i
count += elem
return len(matrix) - 1
def continue_chain(emo1, emo2, matricies):
T1, T2 = matricies
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return (traverse(T1.A[emo1]), traverse(T2.A[emo2]))#, position_and_direction
if __name__ == "__main__":
pass
def main():
pass
| gpl-3.0 | -5,248,547,207,103,633,000 | 23.202532 | 103 | 0.544979 | false |
UCSD-AUVSI/Heimdall | Recognition/ColorClassifier/PythonColorClassifier/ColorClassifier/Python/main.py | 1 | 3405 | import cv2
import math
import numpy as np
import pickle
import sys
import os
import platform
def getColor(color, testCode):
print "starting Get Color"
try:
PATH = "Recognition/ColorClassifier/PythonColorClassifier/ColorClassifier/Python/"
color_db=pickle.load(open(PATH+"color_db.p","rb"))
except :
print "Exception "+ str(sys.exc_info()[0])
raise BaseException
cielab_output = []
name = []
check=[]
for dictionary in color_db:
cielab=dictionary["lab"]
cielab_output.append(cielab)
#add name to names array
name.append(dictionary["name"])
check.append({"cielab":cielab,"name":dictionary["name"]})
#put cielab data into matrix
trainData=np.matrix(cielab_output, dtype=np.float32)
#put names which are numbers right now into a matrix
responses = np.matrix(name, dtype=np.float32)
#turn test point into matrix
newcomer=np.matrix(color, dtype=np.float32)
knn = cv2.KNearest()
# train the data
knn.train(trainData,responses)
# find nearest
ret, results, neighbours ,dist = knn.find_nearest(newcomer, 3)
output = ""
#get results
if testCode == 1 or testCode == 2:
print "result: ", results,"\n"
print "neighbours: ", neighbours,"\n"
print "distance: ", dist
for name,val in COLOR_TO_NUMBER.iteritems():
if val==int(results[0][0]):
output = name
if testCode == 2:
print output
#Check Answer
blank_image=np.zeros((100,100,3),np.uint8)
blank_image[:]=newcomer
blank_image = cv2.cvtColor(blank_image,cv2.COLOR_LAB2BGR)
cv2.imshow("test",blank_image)
cv2.waitKey(0)
print "Color: "+output
print ""
return output
COLOR_TO_NUMBER = {"White":1,"Black":2,"Red":3,"Orange":4,"Yellow":5,"Blue":6,"Green":7,"Purple":8,"Pink":9,"Brown":10,"Grey":11,"Teal":12}
def bgr_to_lab(bgr):
#create blank image 1x1 pixel
blank_image=np.zeros((1,1,3),np.uint8)
#set image pixels to bgr input
blank_image[:]= bgr
#turn into LAB
try:
cielab = cv2.cvtColor(blank_image,cv2.COLOR_BGR2LAB)
except :
print "Exception "+ str(sys.exc_info()[0])
raise BaseException
return cielab[0][0]
def lab_to_bgr(lab):
#create blank image 1x1 pixel
blank_image=np.zeros((1,1,3),np.uint8)
#set image pixels to bgr input
blank_image[:]= lab
#turn into LAB
bgr = cv2.cvtColor(blank_image,cv2.COLOR_LAB2BGR)
return bgr[0][0]
def rgb_to_bgr(rgb):
return tuple(reversed(rgb))
def doColorClassification(givenSColor, givenCColor, optionalArgs):
print "Python Color Classification (this is the Python)\n"
#print sys.version
#print platform.python_version()
#print cv2.__version__
#print [method for method in dir(cv2) if callable(getattr(cv2, method))]
print givenSColor
if len(givenSColor) != 3:
print "WARNING: SColor wasn't a 3-element list!!!"
if len(givenCColor) != 3:
print "WARNING: CColor wasn't a 3-element list!!!"
bgrS = rgb_to_bgr(givenSColor)
bgrC = rgb_to_bgr(givenCColor)
labS = bgr_to_lab(bgrS)
labC = bgr_to_lab(bgrC)
print "----------------------------------------"
print "RGB SColor: "+str(givenSColor)
print "BGR SColor: "+str(bgrS)
print "Lab SColor: "+ str(labS)
returnedSColor = getColor(labS, 0)
print "----------------------------------------"
print "RGB CColor: "+str(givenCColor)
print "BGR SColor: "+str(bgrS)
print "Lab CColor: "+ str(labC)
returnedCColor = getColor(labC, 0)
print "----------------------------------------"
return (returnedSColor, returnedCColor)
#doColorClassification([0,0,0],[0,0,0],1)
| gpl-3.0 | -1,206,405,904,176,180,000 | 28.608696 | 139 | 0.679589 | false |
ewmoore/numpy | setup.py | 1 | 7204 | #!/usr/bin/env python
"""NumPy: array processing for numbers, strings, records, and objects.
NumPy is a general-purpose array-processing package designed to
efficiently manipulate large multi-dimensional arrays of arbitrary
records without sacrificing too much speed for small multi-dimensional
arrays. NumPy is built on the Numeric code base and adds features
introduced by numarray as well as an extended C-API and the ability to
create arrays of arbitrary type which also makes NumPy suitable for
interfacing with general-purpose data-base applications.
There are also basic facilities for discrete fourier transform,
basic linear algebra and random number generation.
"""
DOCLINES = __doc__.split("\n")
import os
import shutil
import sys
import re
import subprocess
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
NAME = 'numpy'
MAINTAINER = "NumPy Developers"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = DOCLINES[0]
LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
URL = "http://www.numpy.org"
DOWNLOAD_URL = "http://sourceforge.net/projects/numpy/files/NumPy/"
LICENSE = 'BSD'
CLASSIFIERS = filter(None, CLASSIFIERS.split('\n'))
AUTHOR = "Travis E. Oliphant et al."
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
MAJOR = 1
MINOR = 8
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing " \
"numpy/version.py and the build directory " \
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.get_version('numpy/version.py') # sets config.version
return config
def setup_package():
# Perform 2to3 if needed
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
src_path = local_path
if sys.version_info[0] == 3:
src_path = os.path.join(local_path, 'build', 'py3k')
sys.path.insert(0, os.path.join(local_path, 'tools'))
import py3tool
print("Converting to Python3 via 2to3...")
py3tool.sync_2to3('numpy', os.path.join(src_path, 'numpy'))
site_cfg = os.path.join(local_path, 'site.cfg')
if os.path.isfile(site_cfg):
shutil.copy(site_cfg, src_path)
# Ugly hack to make pip work with Python 3, see #1857.
# Explanation: pip messes with __file__ which interacts badly with the
# change in directory due to the 2to3 conversion. Therefore we restore
# __file__ to what it would have been otherwise.
global __file__
__file__ = os.path.join(os.curdir, os.path.basename(__file__))
if '--egg-base' in sys.argv:
# Change pip-egg-info entry to absolute path, so pip can find it
# after changing directory.
idx = sys.argv.index('--egg-base')
if sys.argv[idx + 1] == 'pip-egg-info':
sys.argv[idx + 1] = os.path.join(local_path, 'pip-egg-info')
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file everytime
write_version_py()
# Run build
from numpy.distutils.core import setup
try:
setup(
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
configuration=configuration )
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
| bsd-3-clause | -3,140,936,165,528,526,000 | 32.663551 | 87 | 0.62271 | false |
tbjoern/adventofcode | One/script.py | 1 | 1159 | file = open("input.txt", "r")
input = file.next()
sequence = input.split(", ")
class walker:
def __init__(self):
self.east = 0
self.south = 0
self.facing = 0
self.tiles = {}
def turnL(self):
if self.facing == 0:
self.facing = 3
else:
self.facing -= 1
def turnR(self):
if self.facing == 3:
self.facing = 0
else:
self.facing += 1
def walk(self,dist):
for i in range(0, dist):
if self.facing == 0:
self.south -= 1
elif self.facing == 1:
self.east += 1
elif self.facing == 2:
self.south += 1
else:
self.east -= 1
if self.kek():
return True
self.addTile(self.east,self.south)
return False
def totalDist(self):
return abs(self.east) + abs(self.south)
def addTile(self, x, y):
if x in self.tiles:
self.tiles[x].append(y)
else:
self.tiles[x] = [y]
def kek(self):
if self.east in self.tiles:
if self.south in self.tiles[self.east]:
return True
return False
w = walker()
for s in sequence:
if s[0] == "R":
w.turnR()
else:
w.turnL()
if w.walk(int(s[1:])):
break
print w.totalDist() | mit | 5,358,361,581,108,001,000 | 15.328358 | 42 | 0.559103 | false |
CLVsol/odoo_addons | clv_seedling/batch_history/clv_seedling_batch_history.py | 1 | 2387 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from openerp.osv import osv
from datetime import *
class clv_seedling_batch_history(osv.Model):
_name = 'clv_seedling.batch_history'
seedling_id = fields.Many2one('clv_seedling', 'Seedling', required=False)
batch_id = fields.Many2one('clv_batch', 'Batch', required=False)
incoming_date = fields.Datetime('Incoming Date', required=False,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
outgoing_date = fields.Datetime('Outgoing Date', required=False)
notes = fields.Text(string='Notes')
_order = "incoming_date desc"
class clv_seedling(osv.Model):
_inherit = 'clv_seedling'
batch_history_ids = fields.One2many('clv_seedling.batch_history', 'seedling_id', 'Batch History')
class clv_batch(osv.Model):
_inherit = 'clv_batch'
seedling_batch_history_ids = fields.One2many('clv_seedling.batch_history', 'batch_id', 'Seedling Batch History')
| agpl-3.0 | 3,222,787,404,884,915,000 | 53.25 | 116 | 0.514872 | false |
what-studio/profiling | test/test_tracing.py | 1 | 1802 | # -*- coding: utf-8 -*-
import sys
import pytest
from _utils import factorial, find_stats, foo
from profiling.stats import RecordingStatistics
from profiling.tracing import TracingProfiler
def test_setprofile():
profiler = TracingProfiler()
assert sys.getprofile() is None
with profiler:
assert sys.getprofile() == profiler._profile
assert sys.getprofile() is None
sys.setprofile(lambda *x: x)
with pytest.raises(RuntimeError):
profiler.start()
sys.setprofile(None)
def test_profile():
profiler = TracingProfiler()
frame = foo()
profiler._profile(frame, 'call', None)
profiler._profile(frame, 'return', None)
assert len(profiler.stats) == 1
stats1 = find_stats(profiler.stats, 'foo')
stats2 = find_stats(profiler.stats, 'bar')
stats3 = find_stats(profiler.stats, 'baz')
assert stats1.own_hits == 0
assert stats2.own_hits == 0
assert stats3.own_hits == 1
assert stats1.deep_hits == 1
assert stats2.deep_hits == 1
assert stats3.deep_hits == 1
def test_profiler():
profiler = TracingProfiler(base_frame=sys._getframe())
assert isinstance(profiler.stats, RecordingStatistics)
stats, cpu_time, wall_time = profiler.result()
assert len(stats) == 0
with profiler:
factorial(1000)
factorial(10000)
stats1 = find_stats(profiler.stats, 'factorial')
stats2 = find_stats(profiler.stats, '__enter__')
stats3 = find_stats(profiler.stats, '__exit__')
assert stats1.deep_time != 0
assert stats1.deep_time == stats1.own_time
assert stats1.own_time > stats2.own_time
assert stats1.own_time > stats3.own_time
assert stats1.own_hits == 2
assert stats2.own_hits == 0 # entering to __enter__() wasn't profiled.
assert stats3.own_hits == 1
| bsd-3-clause | 2,869,637,923,411,800,600 | 30.614035 | 75 | 0.671476 | false |
AstroHuntsman/POCS | pocs/focuser/birger.py | 1 | 16549 | import io
import re
import serial
import time
import glob
from pocs.focuser.focuser import AbstractFocuser
# Birger adaptor serial numbers should be 5 digits
serial_number_pattern = re.compile('^\d{5}$')
# Error codes should be 'ERR' followed by 1-2 digits
error_pattern = re.compile('(?<=ERR)\d{1,2}')
error_messages = ('No error',
'Unrecognised command',
'Lens is in manual focus mode',
'No lens connected',
'Lens distance stop error',
'Aperture not initialised',
'Invalid baud rate specified',
'Reserved',
'Reserved',
'A bad parameter was supplied to the command',
'XModem timeout',
'XModem error',
'XModem unlock code incorrect',
'Not used',
'Invalid port',
'Licence unlock failure',
'Invalid licence file',
'Invalid library file',
'Reserved',
'Reserved',
'Not used',
'Library not ready for lens communications',
'Library not ready for commands',
'Command not licensed',
'Invalid focus range in memory. Try relearning the range',
'Distance stops not supported by the lens')
class Focuser(AbstractFocuser):
"""
Focuser class for control of a Canon DSLR lens via a Birger Engineering Canon EF-232 adapter
"""
# Class variable to cache the device node scanning results
_birger_nodes = None
# Class variable to store the device nodes already in use. Prevents scanning known Birgers &
# acts as a check against Birgers assigned to incorrect ports.
_assigned_nodes = []
def __init__(self,
name='Birger Focuser',
model='Canon EF-232',
initial_position=None,
dev_node_pattern='/dev/tty.USA49WG*.?',
*args, **kwargs):
super().__init__(name=name, model=model, *args, **kwargs)
self.logger.debug('Initialising Birger focuser')
if serial_number_pattern.match(self.port):
# Have been given a serial number
if self._birger_nodes is None:
# No cached device nodes scanning results, need to scan.
self._birger_nodes = {}
# Find nodes matching pattern
device_nodes = glob.glob(dev_node_pattern)
# Remove nodes already assigned to other Birger objects
device_nodes = [node for node in device_nodes if node not in self._assigned_nodes]
for device_node in device_nodes:
try:
serial_number = self.connect(device_node)
self._birger_nodes[serial_number] = device_node
except (serial.SerialException, serial.SerialTimeoutException, AssertionError):
# No birger on this node.
pass
finally:
self._serial_port.close()
# Search in cached device node scanning results for serial number
try:
device_node = self._birger_nodes[self.port]
except KeyError:
self.logger.critical("Could not find {} ({})!".format(self.name, self.port))
return
self.port = device_node
# Check that this node hasn't already been assigned to another Birgers
if self.port in self._assigned_nodes:
self.logger.critical("Device node {} already in use!".format(self.port))
return
self.connect(self.port)
self._assigned_nodes.append(self.port)
self._initialise
if initial_position:
self.position = initial_position
##################################################################################################
# Properties
##################################################################################################
@property
def is_connected(self):
"""
Checks status of serial port to determine if connected.
"""
connected = False
if self._serial_port:
connected = self._serial_port.isOpen()
return connected
@AbstractFocuser.position.getter
def position(self):
"""
Returns current focus position in the lens focus encoder units
"""
response = self._send_command('pf', response_length=1)
return int(response[0].rstrip())
@property
def min_position(self):
"""
Returns position of close limit of focus travel, in encoder units
"""
return self._min_position
@property
def max_position(self):
"""
Returns position of far limit of focus travel, in encoder units
"""
return self._max_position
@property
def lens_info(self):
"""
Return basic lens info (e.g. '400mm,f28' for a 400 mm f/2.8 lens)
"""
return self._lens_info
@property
def library_version(self):
"""
Returns the version string of the Birger adaptor library (firmware).
"""
return self._library_version
@property
def hardware_version(self):
"""
Returns the hardware version of the Birger adaptor
"""
return self._hardware_version
##################################################################################################
# Public Methods
##################################################################################################
def connect(self, port):
try:
# Configure serial port.
# Settings copied from Bob Abraham's birger.c
self._serial_port = serial.Serial()
self._serial_port.port = port
self._serial_port.baudrate = 115200
self._serial_port.bytesize = serial.EIGHTBITS
self._serial_port.parity = serial.PARITY_NONE
self._serial_port.stopbits = serial.STOPBITS_ONE
self._serial_port.timeout = 2.0
self._serial_port.xonxoff = False
self._serial_port.rtscts = False
self._serial_port.dsrdtr = False
self._serial_port.write_timeout = None
self._inter_byte_timeout = None
# Establish connection
self._serial_port.open()
except serial.SerialException as err:
self._serial_port = None
self.logger.critical('Could not open {}!'.format(port))
raise err
time.sleep(2)
# Want to use a io.TextWrapper in order to have a readline() method with universal newlines
# (Birger sends '\r', not '\n'). The line_buffering option causes an automatic flush() when
# a write contains a newline character.
self._serial_io = io.TextIOWrapper(io.BufferedRWPair(self._serial_port, self._serial_port),
newline='\r', encoding='ascii', line_buffering=True)
self.logger.debug('Established serial connection to {} on {}.'.format(self.name, port))
# Set 'verbose' and 'legacy' response modes. The response from this depends on
# what the current mode is... but after a power cycle it should be 'rm1,0', 'OK'
try:
self._send_command('rm1,0', response_length=0)
except AssertionError as err:
self.logger.critical('Error communicating with {} on {}!'.format(self.name, port))
raise err
# Return serial number
return send_command('sn', response_length=1)[0].rstrip()
def move_to(self, position):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested position but will warn if the lens reports hitting a stop.
Returns the actual position moved to in lens encoder units.
"""
response = self._send_command('fa{:d}'.format(int(position)), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved to {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
def move_by(self, increment):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested increment but will warn if the lens reports hitting a stop.
Returns the actual distance moved in lens encoder units.
"""
response = self._send_command('mf{:d}'.format(increment), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved by {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
##################################################################################################
# Private Methods
##################################################################################################
def _send_command(self, command, response_length=None, ignore_response=False):
"""
Sends a command to the Birger adaptor and retrieves the response.
Args:
command (string): command string to send (without newline), e.g. 'fa1000', 'pf'
response length (integer, optional, default=None): number of lines of response expected.
For most commands this should be 0 or 1. If None readlines() will be called to
capture all responses. As this will block until the timeout expires it should only
be used if the number of lines expected is not known (e.g. 'ds' command).
Returns:
list: possibly empty list containing the '\r' terminated lines of the response from the adaptor.
"""
if not self.is_connected:
self.logger.critical("Attempt to send command to {} when not connected!".format(self))
return
# Clear the input buffer in case there's anything left over in there.
self._serial_port.reset_input_buffer()
# Send command
self._serial_io.write(command + '\r')
if ignore_response:
return
# In verbose mode adaptor will first echo the command
echo = self._serial_io.readline().rstrip()
assert echo == command, self.logger.warning("echo != command: {} != {}".format(echo, command))
# Adaptor should then send 'OK', even if there was an error.
ok = self._serial_io.readline().rstrip()
assert ok == 'OK'
# Depending on which command was sent there may or may not be any further
# response.
response = []
if response_length == 0:
# Not expecting any further response. Should check the buffer anyway in case an error
# message has been sent.
if self._serial_port.in_waiting:
response.append(self._serial_io.readline())
elif response_length > 0:
# Expecting some number of lines of response. Attempt to read that many lines.
for i in range(response_length):
response.append(self._serial_io.readline())
else:
# Don't know what to expect. Call readlines() to get whatever is there.
response.append(self._serial_io.readlines())
# Check for an error message in response
if response:
# Not an empty list.
error_match = error_pattern.match(response[0])
if error_match:
# Got an error message! Translate it.
try:
error_message = error_messages[int(error_match.group())]
self.logger.error("{} returned error message '{}'!".format(self, error_message))
except Exception:
self.logger.error("Unknown error '{}' from {}!".format(error_match.group(), self))
return response
def _initialise(self):
# Get serial number. Note, this is the serial number of the Birger adaptor,
# *not* the attached lens (which would be more useful). Accessible as self.uid
self._get_serial_number()
# Get the version string of the adaptor software libray. Accessible as self.library_version
self._get_library_version()
# Get the hardware version of the adaptor. Accessible as self.hardware_version
self._get_hardware_version()
# Get basic lens info (e.g. '400mm,f28' for a 400 mm, f/2.8 lens). Accessible as self.lens_info
self._get_lens_info()
# Initialise the aperture motor. This also has the side effect of fully opening the iris.
self._initialise_aperture()
# Initalise focus. First move the focus to the close stop.
self._move_zero()
# Then reset the focus encoder counts to 0
self._zero_encoder()
self._min_position = 0
# Calibrate the focus with the 'Learn Absolute Focus Range' command
self._learn_focus_range()
# Finally move the focus to the far stop (close to where we'll want it) and record position
self._max_position = self._move_inf()
self.logger.info('\t\t\t {} initialised'.format(self))
def _get_serial_number(self):
response = self._send_command('sn', response_length=1)
self._serial_number = response[0].rstrip()
self.logger.debug("Got serial number {} for {} on {}".format(self.uid, self.name, self.port))
def _get_library_version(self):
response = self._send_command('lv', response_length=1)
self._library_version = response[0].rstrip()
self.logger.debug("Got library version '{}' for {} on {}".format(self.library_version, self.name, self.port))
def _get_hardware_version(self):
response = self._send_command('hv', response_length=1)
self._hardware_version = response[0].rstrip()
self.logger.debug("Got hardware version {} for {} on {}".format(self.hardware_version, self.name, self.port))
def _get_lens_info(self):
response = self._send_command('id', response_length=1)
self._lens_info = response[0].rstrip()
self.logger.debug("Got lens info '{}' for {} on {}".format(self.lens_info, self.name, self.port))
def _initialise_aperture(self):
self.logger.debug('Initialising aperture motor')
response = self._send_command('in', response_length=1)
if response[0].rstrip() != 'DONE':
self.logger.error("{} got response '{}', expected 'DONE'!".format(self, response[0].rstrip()))
def _move_zero(self):
response = self._send_command('mz', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to close stop".format(r[:-2]))
return int(r[:-2])
def _zero_encoder(self):
self.logger.debug('Setting focus encoder zero point')
self._send_command('sf0', response_length=0)
def _learn_focus_range(self):
self.logger.debug('Learning absolute focus range')
response = self._send_command('la', response_length=1)
if response[0].rstrip() != 'DONE:LA':
self.logger.error("{} got response '{}', expected 'DONE:LA'!".format(self, response[0].rstrip()))
def _move_inf(self):
response = self._send_command('mi', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to far stop".format(r[:-2]))
return int(r[:-2])
| mit | 46,177,568,672,669,770 | 40.580402 | 117 | 0.563236 | false |
queria/my-tempest | tempest/api/compute/servers/test_list_server_filters.py | 1 | 13183 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersTestJSON, cls).resource_setup()
cls.client = cls.servers_client
# Check to see if the alternate image ref actually exists...
images_client = cls.images_client
resp, images = images_client.list_images()
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.images_client.get_image(cls.image_ref)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.images_client.get_image(cls.image_ref_alt)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s1 = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s2 = cls.create_test_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
wait_until='ACTIVE')
cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
if (CONF.service_available.neutron and
CONF.compute.allow_tenant_isolation):
network = cls.isolated_creds.get_primary_network()
cls.fixed_network_name = network['name']
else:
cls.fixed_network_name = CONF.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_shutoff_status(self):
# Filter the list of servers by server shutoff status
params = {'status': 'shutoff'}
self.client.stop(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'SHUTOFF')
resp, body = self.client.list_servers(params)
self.client.start(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'ACTIVE')
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@test.attr(type='gate')
def test_list_servers_filter_by_zero_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 0}
resp, servers = self.client.list_servers(params)
self.assertEqual(0, len(servers['servers']))
@test.attr(type='gate')
def test_list_servers_filter_by_exceed_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 100000}
resp, servers = self.client.list_servers(params)
resp, all_servers = self.client.list_servers()
self.assertEqual(len([x for x in all_servers['servers'] if 'id' in x]),
len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
if x['id'] in test_ids])
@test.attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filtered_by_name_regex(self):
# list of regex that should match s1, s2 and s3
regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
for regex in regexes:
params = {'name': regex}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[-10:]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.skip_because(bug="1182883",
condition=CONF.service_available.neutron)
@test.attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
class ListServerFiltersTestXML(ListServerFiltersTestJSON):
_interface = 'xml'
| apache-2.0 | -4,528,039,094,660,842,500 | 43.090301 | 79 | 0.602974 | false |
linuxmidhun/0install | zeroinstall/cmd/remove_feed.py | 1 | 1066 | """
The B{0install remove-feed} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
syntax = "[INTERFACE] FEED"
from zeroinstall import SafeException, _
from zeroinstall.injector import model, writer
from zeroinstall.cmd import add_feed, UsageError
add_options = add_feed.add_options
def handle(config, options, args):
"""@type args: [str]"""
if len(args) == 2:
iface = config.iface_cache.get_interface(model.canonical_iface_uri(args[0]))
try:
feed_url = model.canonical_iface_uri(args[1])
except SafeException:
feed_url = args[1] # File might not exist any longer
feed_import = add_feed.find_feed_import(iface, feed_url)
if not feed_import:
raise SafeException(_('Interface %(interface)s has no feed %(feed)s') %
{'interface': iface.uri, 'feed': feed_url})
iface.extra_feeds.remove(feed_import)
writer.save_interface(iface)
elif len(args) == 1:
add_feed.handle(config, options, args, add_ok = False, remove_ok = True)
else:
raise UsageError()
| lgpl-2.1 | 7,947,182,784,579,347,000 | 30.352941 | 78 | 0.707317 | false |
catapult-project/catapult-csm | telemetry/telemetry/internal/browser/browser_unittest.py | 1 | 11913 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import tempfile
import unittest
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.browser import browser as browser_module
from telemetry.internal.browser import browser_finder
from telemetry.internal.platform import gpu_device
from telemetry.internal.platform import gpu_info
from telemetry.internal.platform import system_info
from telemetry.internal.util import path
from telemetry.testing import browser_test_case
from telemetry.testing import options_for_unittests
from telemetry.timeline import tracing_config
from devil.android import app_ui
import mock
import py_utils
class IntentionalException(Exception):
pass
class BrowserTest(browser_test_case.BrowserTestCase):
def testBrowserCreation(self):
self.assertEquals(1, len(self._browser.tabs))
# Different browsers boot up to different things.
assert self._browser.tabs[0].url
@decorators.Enabled('has tabs')
def testNewCloseTab(self):
existing_tab = self._browser.tabs[0]
self.assertEquals(1, len(self._browser.tabs))
existing_tab_url = existing_tab.url
new_tab = self._browser.tabs.New()
self.assertEquals(2, len(self._browser.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
self.assertEquals(new_tab.url, 'about:blank')
new_tab.Close()
self.assertEquals(1, len(self._browser.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
def testMultipleTabCalls(self):
self._browser.tabs[0].Navigate(self.UrlOfUnittestFile('blank.html'))
self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
def testTabCallByReference(self):
tab = self._browser.tabs[0]
tab.Navigate(self.UrlOfUnittestFile('blank.html'))
self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
@decorators.Enabled('has tabs')
def testCloseReferencedTab(self):
self._browser.tabs.New()
tab = self._browser.tabs[0]
tab.Navigate(self.UrlOfUnittestFile('blank.html'))
tab.Close()
self.assertEquals(1, len(self._browser.tabs))
@decorators.Enabled('has tabs')
def testForegroundTab(self):
# Should be only one tab at this stage, so that must be the foreground tab
original_tab = self._browser.tabs[0]
self.assertEqual(self._browser.foreground_tab, original_tab)
new_tab = self._browser.tabs.New()
# New tab shouls be foreground tab
self.assertEqual(self._browser.foreground_tab, new_tab)
# Make sure that activating the background tab makes it the foreground tab
original_tab.Activate()
self.assertEqual(self._browser.foreground_tab, original_tab)
# Closing the current foreground tab should switch the foreground tab to the
# other tab
original_tab.Close()
self.assertEqual(self._browser.foreground_tab, new_tab)
# This test uses the reference browser and doesn't have access to
# helper binaries like crashpad_database_util.
@decorators.Enabled('linux')
def testGetMinidumpPathOnCrash(self):
tab = self._browser.tabs[0]
with self.assertRaises(exceptions.AppCrashException):
tab.Navigate('chrome://crash', timeout=5)
crash_minidump_path = self._browser.GetMostRecentMinidumpPath()
self.assertIsNotNone(crash_minidump_path)
def testGetSystemInfo(self):
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info = self._browser.GetSystemInfo()
self.assertTrue(isinstance(info, system_info.SystemInfo))
self.assertTrue(hasattr(info, 'model_name'))
self.assertTrue(hasattr(info, 'gpu'))
self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))
self.assertTrue(hasattr(info.gpu, 'devices'))
self.assertTrue(len(info.gpu.devices) > 0)
for g in info.gpu.devices:
self.assertTrue(isinstance(g, gpu_device.GPUDevice))
def testGetSystemInfoNotCachedObject(self):
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info_a = self._browser.GetSystemInfo()
info_b = self._browser.GetSystemInfo()
self.assertFalse(info_a is info_b)
def testGetSystemTotalMemory(self):
self.assertTrue(self._browser.memory_stats['SystemTotalPhysicalMemory'] > 0)
def testSystemInfoModelNameOnMac(self):
if self._browser.platform.GetOSName() != 'mac':
self.skipTest('This test is only run on macOS')
return
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info = self._browser.GetSystemInfo()
model_name_re = r"[a-zA-Z]* [0-9.]*"
self.assertNotEqual(re.match(model_name_re, info.model_name), None)
# crbug.com/628836 (CrOS, where system-guest indicates ChromeOS guest)
# github.com/catapult-project/catapult/issues/3130 (Windows)
@decorators.Disabled('cros-chrome-guest', 'system-guest', 'chromeos', 'win')
def testIsTracingRunning(self):
tracing_controller = self._browser.platform.tracing_controller
if not tracing_controller.IsChromeTracingSupported():
return
self.assertFalse(tracing_controller.is_tracing_running)
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
self.assertTrue(tracing_controller.is_tracing_running)
tracing_controller.StopTracing()
self.assertFalse(tracing_controller.is_tracing_running)
@decorators.Enabled('android')
def testGetAppUi(self):
self.assertTrue(self._browser.supports_app_ui_interactions)
ui = self._browser.GetAppUi()
self.assertTrue(isinstance(ui, app_ui.AppUi))
self.assertIsNotNone(ui.WaitForUiNode(resource_id='action_bar_root'))
class CommandLineBrowserTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--user-agent=telemetry')
def testCommandLineOverriding(self):
# This test starts the browser with --user-agent=telemetry. This tests
# whether the user agent is then set.
t = self._browser.tabs[0]
t.Navigate(self.UrlOfUnittestFile('blank.html'))
t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'),
'telemetry')
class DirtyProfileBrowserTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.profile_type = 'small_profile'
@decorators.Disabled('chromeos') # crbug.com/243912
def testDirtyProfileCreation(self):
self.assertEquals(1, len(self._browser.tabs))
class BrowserLoggingTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.logging_verbosity = options.VERBOSE_LOGGING
@decorators.Disabled('chromeos', 'android')
def testLogFileExist(self):
self.assertTrue(
os.path.isfile(self._browser._browser_backend.log_file_path))
def _GenerateBrowserProfile(number_of_tabs):
""" Generate a browser profile which browser had |number_of_tabs| number of
tabs opened before it was closed.
Returns:
profile_dir: the directory of profile.
"""
profile_dir = tempfile.mkdtemp()
options = options_for_unittests.GetCopy()
options.browser_options.output_profile_path = profile_dir
browser_to_create = browser_finder.FindBrowser(options)
browser_to_create.platform.network_controller.InitializeIfNeeded()
try:
with browser_to_create.Create(options) as browser:
browser.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
blank_file_path = os.path.join(path.GetUnittestDataDir(), 'blank.html')
blank_url = browser.platform.http_server.UrlOf(blank_file_path)
browser.foreground_tab.Navigate(blank_url)
browser.foreground_tab.WaitForDocumentReadyStateToBeComplete()
for _ in xrange(number_of_tabs - 1):
tab = browser.tabs.New()
tab.Navigate(blank_url)
tab.WaitForDocumentReadyStateToBeComplete()
return profile_dir
finally:
browser_to_create.platform.network_controller.Close()
class BrowserCreationTest(unittest.TestCase):
def setUp(self):
self.mock_browser_backend = mock.MagicMock()
self.mock_platform_backend = mock.MagicMock()
def testCleanedUpCalledWhenExceptionRaisedInBrowserCreation(self):
self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
IntentionalException('Boom!'))
with self.assertRaises(IntentionalException):
browser_module.Browser(
self.mock_browser_backend, self.mock_platform_backend,
credentials_path=None)
self.assertTrue(self.mock_platform_backend.WillCloseBrowser.called)
def testOriginalExceptionNotSwallow(self):
self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
IntentionalException('Boom!'))
self.mock_platform_backend.WillCloseBrowser.side_effect = (
IntentionalException('Cannot close browser!'))
with self.assertRaises(IntentionalException) as context:
browser_module.Browser(
self.mock_browser_backend, self.mock_platform_backend,
credentials_path=None)
self.assertIn('Boom!', context.exception.message)
class BrowserRestoreSessionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._number_of_tabs = 4
cls._profile_dir = _GenerateBrowserProfile(cls._number_of_tabs)
cls._options = options_for_unittests.GetCopy()
cls._options.browser_options.AppendExtraBrowserArgs(
['--restore-last-session'])
cls._options.browser_options.profile_dir = cls._profile_dir
cls._browser_to_create = browser_finder.FindBrowser(cls._options)
cls._browser_to_create.platform.network_controller.InitializeIfNeeded()
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos', 'win', 'mac')
# TODO(nednguyen): Enable this test on windowsn platform
def testRestoreBrowserWithMultipleTabs(self):
with self._browser_to_create.Create(self._options) as browser:
# The number of tabs will be self._number_of_tabs + 1 as it includes the
# old tabs and a new blank tab.
expected_number_of_tabs = self._number_of_tabs + 1
try:
py_utils.WaitFor(
lambda: len(browser.tabs) == expected_number_of_tabs, 10)
except:
logging.error('Number of tabs is %s' % len(browser.tabs))
raise
self.assertEquals(expected_number_of_tabs, len(browser.tabs))
@classmethod
def tearDownClass(cls):
cls._browser_to_create.platform.network_controller.Close()
shutil.rmtree(cls._profile_dir)
class TestBrowserOperationDoNotLeakTempFiles(unittest.TestCase):
@decorators.Enabled('win', 'linux')
# TODO(ashleymarie): Re-enable on mac
# BUG=catapult:#3523
@decorators.Isolated
def testBrowserNotLeakingTempFiles(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
self.assertIsNotNone(browser_to_create)
before_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
browser_to_create.platform.network_controller.InitializeIfNeeded()
try:
with browser_to_create.Create(options) as browser:
tab = browser.tabs.New()
tab.Navigate('about:blank')
self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))
after_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
self.assertEqual(before_browser_run_temp_dir_content,
after_browser_run_temp_dir_content)
finally:
browser_to_create.platform.network_controller.Close()
| bsd-3-clause | -8,673,220,361,794,257,000 | 37.553398 | 80 | 0.732645 | false |
helixyte/TheLMA | thelma/repositories/rdb/mappers/experimentjob.py | 1 | 1080 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Experiment job mapper.
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.experiment import Experiment
from thelma.entities.job import ExperimentJob
from thelma.entities.job import JOB_TYPES
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(job_mapper, job_tbl, experiment_tbl):
"Mapper factory."
m = mapper(ExperimentJob, job_tbl,
inherits=job_mapper,
properties=dict(
experiments=relationship(Experiment,
order_by=experiment_tbl.c.experiment_id,
back_populates='job',
cascade='save-update, merge, delete'
)
),
polymorphic_identity=JOB_TYPES.EXPERIMENT
)
return m
| mit | -7,348,472,988,197,110,000 | 32.75 | 80 | 0.607407 | false |
SeanEstey/Bravo | app/notify/tasks.py | 1 | 7873 | '''app.notify.tasks'''
import json, os, pytz
from os import environ as env
from datetime import datetime, date, time, timedelta
from dateutil.parser import parse
from bson import ObjectId as oid
from flask import g, render_template
from app import get_keys, celery #, smart_emit
from app.lib.dt import to_local
from app.lib import mailgun
from app.main import schedule
from app.main.parser import is_bus
from app.main.etapestry import call, EtapError
from . import email, events, sms, voice, pickups, triggers
from logging import getLogger
log = getLogger(__name__)
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def monitor_triggers(self, **kwargs):
ready = g.db.triggers.find({
'status':'pending',
'fire_dt':{
'$lt':datetime.utcnow()}})
for trigger in ready:
evnt = g.db.events.find_one({'_id':trigger['evnt_id']})
g.group = evnt['agency']
log.debug('Firing event trigger for %s', evnt['name'], extra={'trigger_id':str(trigger['_id'])})
try:
fire_trigger(trigger['_id'])
except Exception as e:
log.exception('Error firing event trigger for %s', evnt['name'])
pending = g.db.triggers.find({
'status':'pending',
'fire_dt': {
'$gt':datetime.utcnow()}}).sort('fire_dt', 1)
output = []
if pending.count() > 0:
tgr = pending.next()
delta = tgr['fire_dt'] - datetime.utcnow().replace(tzinfo=pytz.utc)
to_str = str(delta)[:-7]
return 'next trigger pending in %s' % to_str
else:
return '0 pending'
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def fire_trigger(self, _id=None, **rest):
'''Sends out all dependent sms/voice/email notifics messages
'''
status = ''
n_errors = 0
trig = g.db.triggers.find_one({'_id':oid(_id)})
event = g.db.events.find_one({'_id':trig['evnt_id']})
g.group = event['agency']
g.db.triggers.update_one(
{'_id':oid(_id)},
{'$set': {'task_id':self.request.id, 'status':'in-progress'}})
events.update_status(trig['evnt_id'])
ready = g.db.notifics.find(
{'trig_id':oid(_id), 'tracking.status':'pending'})
count = ready.count()
log.info('Sending notifications for event %s...', event['name'],
extra={'type':trig['type'], 'n_total':count})
#smart_emit('trigger_status',{
# 'trig_id': str(_id), 'status': 'in-progress'})
if env['BRV_SANDBOX'] == 'True':
log.info('sandbox: simulating voice/sms, rerouting emails')
for n in ready:
try:
if n['type'] == 'voice':
status = voice.call(n, get_keys('twilio'))
elif n['type'] == 'sms':
status = sms.send(n, get_keys('twilio'))
elif n['type'] == 'email':
status = email.send(n, get_keys('mailgun'))
except Exception as e:
n_errors +=1
status = 'error'
log.exception('Error sending notification to %s', n['to'],
extra={'type':n['type']})
else:
if status == 'failed':
n_errors += 1
finally:
pass
#smart_emit('notific_status', {
# 'notific_id':str(n['_id']), 'status':status})
g.db.triggers.update_one({'_id':oid(_id)}, {'$set': {'status': 'fired'}})
'''smart_emit('trigger_status', {
'trig_id': str(_id),
'status': 'fired',
'sent': count - n_errors,
'errors': n_errors})'''
log.info('%s/%s notifications sent for event %s', count - n_errors, count, event['name'],
extra={'type':trig['type'], 'n_total':count, 'n_errors':n_errors})
return 'success'
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def schedule_reminders(self, group=None, for_date=None, **rest):
if for_date:
for_date = parse(for_date).date()
groups = [g.db['groups'].find_one({'name':group})] if group else g.db['groups'].find()
evnt_ids = []
for group_ in groups:
n_success = n_fails = 0
g.group = group_['name']
log.info('Scheduling notification events...')
days_ahead = int(group_['notify']['sched_delta_days'])
on_date = date.today() + timedelta(days=days_ahead) if not for_date else for_date
date_str = on_date.strftime('%m-%d-%Y')
blocks = []
for key in group_['cal_ids']:
blocks += schedule.get_blocks(
group_['cal_ids'][key],
datetime.combine(on_date,time(8,0)),
datetime.combine(on_date,time(9,0)),
get_keys('google')['oauth'])
if len(blocks) == 0:
log.debug('no blocks on %s', date_str)
continue
else:
log.debug('%s events on %s: %s',
len(blocks), date_str, ", ".join(blocks))
for block in blocks:
if is_bus(block) and group_['notify']['sched_business'] == False:
continue
try:
evnt_id = pickups.create_reminder(g.group, block, on_date)
except EtapError as e:
n_fails +=1
log.exception('Error creating notification event %s', block)
continue
else:
n_success +=1
evnt_ids.append(str(evnt_id))
log.info('Created notification event %s', block)
log.info('Created %s/%s scheduled notification events',
n_success, n_success + n_fails)
return json.dumps(evnt_ids)
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def skip_pickup(self, evnt_id=None, acct_id=None, **rest):
'''User has opted out of a pickup via sms/voice/email noification.
Run is_valid() before calling this function.
@acct_id: _id from db.accounts, not eTap account id
'''
# Cancel any pending parent notifications
result = g.db.notifics.update_many(
{'acct_id':oid(acct_id), 'evnt_id':oid(evnt_id), 'tracking.status':'pending'},
{'$set':{'tracking.status':'cancelled'}})
acct = g.db.accounts.find_one_and_update(
{'_id':oid(acct_id)},
{'$set': {'opted_out': True}})
evnt = g.db.events.find_one({'_id':oid(evnt_id)})
if not evnt or not acct:
msg = 'evnt/acct not found (evnt_id=%s, acct_id=%s' %(evnt_id,acct_id)
log.error(msg)
raise Exception(msg)
g.group = evnt['agency']
log.info('%s opted out of pickup',
acct.get('name') or acct.get('email'),
extra={'event_name':evnt['name'], 'account_id':acct['udf']['etap_id']})
try:
call('skip_pickup', data={
'acct_id': acct['udf']['etap_id'],
'date': acct['udf']['pickup_dt'].strftime('%d/%m/%Y'),
'next_pickup': to_local(
acct['udf']['future_pickup_dt'],
to_str='%d/%m/%Y')})
except Exception as e:
log.exception('Error calling skip_pickup')
log.exception("Error updating account %s",
acct.get('name') or acct.get('email'),
extra={'account_id': acct['udf']['etap_id']})
if not acct.get('email'):
return 'success'
try:
body = render_template(
'email/%s/no_pickup.html' % g.group,
to=acct['email'],
account=to_local(obj=acct, to_str='%B %d %Y'))
except Exception as e:
log.exception('Error rendering no_pickup template')
raise
else:
mailgun.send(
acct['email'],
'Thanks for Opting Out',
body,
get_keys('mailgun'),
v={'type':'opt_out', 'group':g.group})
return 'success'
| gpl-2.0 | -6,240,724,140,937,675,000 | 32.7897 | 104 | 0.526737 | false |
tuxofil/Gps2Udp | misc/server/gps2udp.py | 1 | 5891 | #!/usr/bin/env python
"""
Receive Geo location data from the Gps2Udp Android application
via UDP/IP and forward them to the stdout line by line.
There is some requirements to a valid incoming packet:
- it must be of form: TIMESTAMP LATITUDE LONGITUDE ACCURACY [other fields];
- TIMESTAMP is a Unix timestamp (seconds since 1 Jan 1970);
- the diff between TIMESTAMP and local time must be less
than MAX_TIME_DIFF (definition of the MAX_TIME_DIFF variable see below);
- TIMESTAMP must be greater than timestamp of a previous valid packet;
- LATITUDE is a float between [-90.0..90.0];
- LONGITUDE is a float between [-180.0..180.0];
- ACCURACY is an integer between [0..MAX_ACCURACY] (definition of
MAX_ACCURACY variable see below).
If any of the requirements are not met, the packet will be silently ignored.
When started with --signed command line option, an extra field must
be defined in each incoming UDP packet - DIGEST. With the field common
packet format must be of form:
TIMESTAMP LATITUDE LONGITUDE ACCURACY DIGEST
DIGEST - is a SHA1 from "TIMESTAMP LATITUDE LONGITUDE ACCURACY" + secret
string known only by Gps2Udp client (Android app) and the server. The
server reads the secret from GPS2UDP_SECRET environment variable.
Important notes. When in --signed mode:
- any packet without the digest will be ignored;
- any packet with digest not matched with digest calculated on the
server side, will be ignored;
- if the secret is not defined (GPS2UDP_SECRET environment variable is not
set or empty), no packets will be matched as valid.
"""
import getopt
import hashlib
import os
import os.path
import socket
import sys
import time
DEFAULT_PORT = 5000
# Maximum time difference between a timestamp in a packet and
# the local Unix timestamp (in seconds).
MAX_TIME_DIFF = 60 * 5
# Maximum valid accuracy value (in meters).
MAX_ACCURACY = 10000 # 10km
# Here will be stored the timestamp of the last valid packet received.
# The timestamp will be used later to avoid receiving data from the past.
LAST_TIMESTAMP = None
def usage(exitcode = 1):
"""
Show usage info and exit.
"""
argv0 = os.path.basename(sys.argv[0])
print 'Usage: {0} [options]'.format(argv0)
print ' Options:'
print ' --signed check every UDP packet for digital signature;'
print ' --port=N UDP port number to listen. Default is 5000.'
sys.exit(exitcode)
def main():
"""
Entry point.
"""
try:
cmd_opts, _cmd_args = getopt.getopt(
sys.argv[1:], '', ['port=', 'signed'])
except getopt.GetoptError as exc:
sys.stderr.write('Error: ' + str(exc) + '\n')
usage()
cmd_opts = dict(cmd_opts)
port = int(cmd_opts.get('--port', str(DEFAULT_PORT)))
signed = '--signed' in cmd_opts
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port))
while True:
data, _addr = sock.recvfrom(100)
try:
result = parse_packet(data, signed)
except PacketParseError:
continue
sys.stdout.write(format_packet(result))
sys.stdout.flush()
class PacketParseError(Exception):
"""Bad packet received."""
pass
def parse_packet(data, signed = False):
"""
Parse and check incoming packet.
The packet must be of form:
TIMESTAMP LATITUDE LONGITUDE ACCURACY
:param data: packet body
:type data: string
:param signed: if True, the packet will be checked for a
valid digital signature
:type signed: boolean
:rtype: dict
"""
global LAST_TIMESTAMP
result = {}
tokens = [elem for elem in data.strip().split(' ') if elem]
if signed:
# check the signature
if len(tokens) < 5:
raise PacketParseError
payload = ' '.join(tokens[:4])
digest = tokens[4]
secret = os.environ.get('GPS2UDP_SECRET')
if secret is None or len(secret) == 0:
# secret is not defined => unable to check
raise PacketParseError
hasher = hashlib.sha1()
hasher.update(payload + secret)
if hasher.hexdigest() != digest:
# digital signature mismatch
raise PacketParseError
else:
# check tokens count
if len(tokens) < 4:
raise PacketParseError
# parse the tokens
try:
result['timestamp'] = int(tokens[0])
result['latitude'] = float(tokens[1])
result['longitude'] = float(tokens[2])
result['accuracy'] = int(tokens[3])
except ValueError:
raise PacketParseError
# check timestamp
time_diff = abs(result['timestamp'] - int(time.time()))
if time_diff > MAX_TIME_DIFF:
# the timestamp differs from NOW for more than 5 minutes
raise PacketParseError
if LAST_TIMESTAMP is not None:
if result['timestamp'] <= LAST_TIMESTAMP:
# the timestamp is not greater than the previous timestamp
raise PacketParseError
# check lat&long values
if not (-90.0 <= result['latitude'] <= 90.0):
raise PacketParseError
if not (-180.0 <= result['longitude'] <= 180.0):
raise PacketParseError
# check accuracy value
if result['accuracy'] < 0 or result['accuracy'] > MAX_ACCURACY:
raise PacketParseError
# All checks is passed => packet is valid.
# Save the timestamp in global var:
LAST_TIMESTAMP = result['timestamp']
return result
def format_packet(data):
"""
Format received packet for the stdout.
:param data: packet data
:type data: dict
:rtype: string
"""
return (str(data['timestamp']) + ' ' +
format(data['latitude'], '.7f') + ' ' +
format(data['longitude'], '.7f') + ' ' +
str(data['accuracy']) + '\n')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| bsd-2-clause | -6,502,868,409,911,242,000 | 31.016304 | 76 | 0.647938 | false |
Puppet-Finland/trac | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | 1 | 4322 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Dirk Stöcker <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import hashlib
import random
import urllib2
from trac.config import Option
from trac.core import Component, implements
from trac.util.html import tag
from tracspamfilter.api import user_agent
from tracspamfilter.captcha import ICaptchaMethod
class KeycaptchaCaptcha(Component):
"""KeyCaptcha implementation"""
implements(ICaptchaMethod)
private_key = Option('spam-filter', 'captcha_keycaptcha_private_key', '',
"""Private key for KeyCaptcha usage.""", doc_domain="tracspamfilter")
user_id = Option('spam-filter', 'captcha_keycaptcha_user_id', '',
"""User id for KeyCaptcha usage.""", doc_domain="tracspamfilter")
def generate_captcha(self, req):
session_id = "%d-3.4.0.001" % random.randint(1, 10000000)
sign1 = hashlib.md5(session_id + req.remote_addr +
self.private_key).hexdigest()
sign2 = hashlib.md5(session_id + self.private_key).hexdigest()
varblock = "var s_s_c_user_id = '%s';\n" % self.user_id
varblock += "var s_s_c_session_id = '%s';\n" % session_id
varblock += "var s_s_c_captcha_field_id = 'keycaptcha_response_field';\n"
varblock += "var s_s_c_submit_button_id = 'keycaptcha_response_button';\n"
varblock += "var s_s_c_web_server_sign = '%s';\n" % sign1
varblock += "var s_s_c_web_server_sign2 = '%s';\n" % sign2
varblock += "document.s_s_c_debugmode=1;\n"
fragment = tag(tag.script(varblock, type='text/javascript'))
fragment.append(
tag.script(type='text/javascript',
src='http://backs.keycaptcha.com/swfs/cap.js')
)
fragment.append(
tag.input(type='hidden', id='keycaptcha_response_field',
name='keycaptcha_response_field')
)
fragment.append(
tag.input(type='submit', id='keycaptcha_response_button',
name='keycaptcha_response_button')
)
req.session['captcha_key_session'] = session_id
return None, fragment
def verify_key(self, private_key, user_id):
if private_key is None or user_id is None:
return False
# FIXME - Not yet implemented
return True
def verify_captcha(self, req):
session = None
if 'captcha_key_session' in req.session:
session = req.session['captcha_key_session']
del req.session['captcha_key_session']
response_field = req.args.get('keycaptcha_response_field')
val = response_field.split('|')
s = hashlib.md5('accept' + val[1] + self.private_key +
val[2]).hexdigest()
self.log.debug("KeyCaptcha response: %s .. %s .. %s",
response_field, s, session)
if s == val[0] and session == val[3]:
try:
request = urllib2.Request(
url=val[2],
headers={"User-agent": user_agent}
)
response = urllib2.urlopen(request)
return_values = response.read()
response.close()
except Exception, e:
self.log.warning("Exception in KeyCaptcha handling (%s)", e)
else:
self.log.debug("KeyCaptcha check result: %s", return_values)
if return_values == '1':
return True
self.log.warning("KeyCaptcha returned invalid check result: "
"%s (%s)", return_values, response_field)
else:
self.log.warning("KeyCaptcha returned invalid data: "
"%s (%s,%s)", response_field, s, session)
return False
def is_usable(self, req):
return self.private_key and self.user_id
| bsd-2-clause | -6,845,159,761,740,019,000 | 37.580357 | 82 | 0.588753 | false |
thiagopena/PySIGNFe | pysignfe/nfse/bhiss/v10/SubstituicaoNfse.py | 1 | 1459 | # -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
class InfSubstituicaoNfse(XMLNFe):
def __init__(self):
super(InfSubstituicaoNfse, self).__init__()
self.Id = TagCaracter(nome=u'InfSubstituicaoNfse', propriedade=u'Id', raiz=u'/')
self.NfseSubstituidora = TagInteiro(nome=u'NfseSubstituidora', tamanho=[1,15], raiz=u'/')
def get_xml(self):
self.Id.valor = u'substituicao:'+str(self.NfseSubstituidora.valor)
xml = XMLNFe.get_xml(self)
xml += self.Id.xml
xml += self.NfseSubstituidora.xml
xml += u'</InfSubstituicaoNfse>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.NfseSubstituidora.xml = arquivo
xml = property(get_xml, set_xml)
class SubstituicaoNfse(XMLNFe):
def __init__(self):
super(SubstituicaoNfse, self).__init__()
self.InfSubstituicaoNfse = InfSubstituicaoNfse()
self.Signature = Signature()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<SubstituicaoNfse>'
xml += self.InfSubstituicaoNfse.xml
xml += self.Signature.xml
xml += u'</SubstituicaoNfse>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.InfSubstituicaoNfse.xml = arquivo
self.Signature.xml = self._le_noh('//Rps/sig:Signature')
xml = property(get_xml, set_xml) | lgpl-2.1 | 7,768,406,906,340,372,000 | 31.444444 | 97 | 0.601782 | false |
panyam/libgraph | libgraph/graphs.py | 1 | 2606 |
class Edge(object):
def __init__(self, source, target, data = None):
self._source, self._target, self.data = source, target, data
def __repr__(self):
return "Edge<%s <-> %s>" % (repr(self.source), repr(self.target))
@property
def source(self): return self._source
@property
def target(self): return self._target
class Graph(object):
def __init__(self, multi = False, directed = False, key_func = None, neighbors_func = None):
self.nodes = {}
self._is_directed = directed
self._is_multi = multi
self.neighbors_func = neighbors_func
self.key_func = key_func or (lambda x: x)
@property
def is_directed(self): return self._is_directed
@property
def is_multi(self): return self._is_multi
def get_edge(self, source, target):
return self.nodes.get(self.key_func(source), {}).get(self.key_func(target), None)
def add_nodes(self, *nodes):
return [self.add_node(node) for node in nodes]
def add_node(self, node):
"""
Adds or update a node (any hashable) in the graph.
"""
if node not in self.nodes: self.nodes[self.key_func(node)] = {}
return self.nodes[self.key_func(node)]
def neighbors(self, node):
"""Return the neighbors of a node."""
if self.neighbors_func:
return self.neighbors_func(node)
else:
return self.nodes.get(self.key_func(node), {})
def iter_neighbors(self, node, reverse = False):
"""
Return an iterator of neighbors (along with any edge data) for a particular node.
Override this method for custom node storage and inspection strategies.
"""
neighbors = self.neighbors(node)
if type(neighbors) is dict:
if reverse: return reversed(self.neighbors(node).items())
else: return self.neighbors(node).iteritems()
else:
if reverse: return reversed(neighbors)
else: return neighbors
def add_raw_edge(self, edge):
self.add_nodes(edge.source,edge.target)
source,target = edge.source,edge.target
source_key = self.key_func(source)
target_key = self.key_func(target)
self.nodes[source_key][target_key] = edge
if not self.is_directed and source_key != target_key:
self.nodes[target_key][source_key] = edge
return edge
def add_edge(self, source, target):
return self.add_raw_edge(Edge(source, target))
def add_edges(self, *edges):
return [self.add_edge(*e) for e in edges]
| apache-2.0 | 1,825,467,902,488,189,400 | 33.746667 | 96 | 0.608212 | false |
googleapis/googleapis-gen | google/cloud/billing/v1/billing-v1-py/google/cloud/billing_v1/types/__init__.py | 1 | 1795 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .cloud_billing import (
BillingAccount,
CreateBillingAccountRequest,
GetBillingAccountRequest,
GetProjectBillingInfoRequest,
ListBillingAccountsRequest,
ListBillingAccountsResponse,
ListProjectBillingInfoRequest,
ListProjectBillingInfoResponse,
ProjectBillingInfo,
UpdateBillingAccountRequest,
UpdateProjectBillingInfoRequest,
)
from .cloud_catalog import (
AggregationInfo,
Category,
ListServicesRequest,
ListServicesResponse,
ListSkusRequest,
ListSkusResponse,
PricingExpression,
PricingInfo,
Service,
Sku,
)
__all__ = (
'BillingAccount',
'CreateBillingAccountRequest',
'GetBillingAccountRequest',
'GetProjectBillingInfoRequest',
'ListBillingAccountsRequest',
'ListBillingAccountsResponse',
'ListProjectBillingInfoRequest',
'ListProjectBillingInfoResponse',
'ProjectBillingInfo',
'UpdateBillingAccountRequest',
'UpdateProjectBillingInfoRequest',
'AggregationInfo',
'Category',
'ListServicesRequest',
'ListServicesResponse',
'ListSkusRequest',
'ListSkusResponse',
'PricingExpression',
'PricingInfo',
'Service',
'Sku',
)
| apache-2.0 | -5,797,525,092,338,457,000 | 27.046875 | 74 | 0.734819 | false |
akarol/cfme_tests | cfme/tests/cloud_infra_common/test_html5_vm_console.py | 1 | 8156 | # -*- coding: utf-8 -*-
"""Test for HTML5 Remote Consoles of VMware/RHEV/RHOSP Providers."""
import pytest
import imghdr
import time
import re
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.provider import CloudInfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.common.vm import VM
from cfme.utils import ssh
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.conf import credentials
from cfme.utils.providers import ProviderFilter
from wait_for import wait_for
from markers.env_markers.provider import providers
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[CloudInfraProvider],
required_flags=['html5_console'])],
scope='module'),
]
@pytest.fixture(scope="function")
def vm_obj(request, provider, setup_provider, console_template, vm_name):
"""
Create a VM on the provider with the given template, and return the vm_obj.
Also, it will remove VM from provider using nested function _delete_vm
after the test is completed.
"""
vm_obj = VM.factory(vm_name, provider, template_name=console_template.name)
@request.addfinalizer
def _delete_vm():
try:
vm_obj.delete_from_provider()
except Exception:
logger.warning("Failed to delete vm `{}`.".format(vm_obj.name))
vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default")
if provider.one_of(OpenStackProvider):
# Assign FloatingIP to Openstack Instance from pool
# so that we can SSH to it
public_net = provider.data['public_network']
provider.mgmt.assign_floating_ip(vm_obj.name, public_net)
return vm_obj
@pytest.mark.rhv1
def test_html5_vm_console(appliance, provider, configure_websocket, vm_obj,
configure_console_vnc, take_screenshot):
"""
Test the HTML5 console support for a particular provider.
The supported providers are:
VMware
Openstack
RHV
For a given provider, and a given VM, the console will be opened, and then:
- The console's status will be checked.
- A command that creates a file will be sent through the console.
- Using ssh we will check that the command worked (i.e. that the file
was created.
"""
console_vm_username = credentials[provider.data.templates.get('console_template')
['creds']].get('username')
console_vm_password = credentials[provider.data.templates.get('console_template')
['creds']].get('password')
vm_obj.open_console(console='VM Console')
assert vm_obj.vm_console, 'VMConsole object should be created'
vm_console = vm_obj.vm_console
try:
# If the banner/connection-status element exists we can get
# the connection status text and if the console is healthy, it should connect.
assert vm_console.wait_for_connect(180), "VM Console did not reach 'connected' state"
# Get the login screen image, and make sure it is a jpeg file:
screen = vm_console.get_screen()
assert imghdr.what('', screen) == 'jpeg'
assert vm_console.wait_for_text(text_to_find="login:", timeout=200), ("VM Console"
" didn't prompt for Login")
# Enter Username:
vm_console.send_keys(console_vm_username)
assert vm_console.wait_for_text(text_to_find="Password", timeout=200), ("VM Console"
" didn't prompt for Password")
# Enter Password:
vm_console.send_keys("{}\n".format(console_vm_password))
time.sleep(5) # wait for login to complete
# This regex can find if there is a word 'login','password','incorrect' present in
# text, irrespective of its case
regex_for_login_password = re.compile(r'\blogin\b | \bpassword\b| \bincorrect\b',
flags=re.I | re.X)
def _validate_login():
"""
Try to read what is on present on the last line in console.
If it is word 'login', enter username, if 'password' enter password, in order
to make the login successful
"""
if vm_console.find_text_on_screen(text_to_find='login', current_line=True):
vm_console.send_keys(console_vm_username)
if vm_console.find_text_on_screen(text_to_find='Password', current_line=True):
vm_console.send_keys("{}\n".format(console_vm_password))
# if the login attempt failed for some reason (happens with RHOS-cirros),
# last line of the console will contain one of the following words:
# [login, password, incorrect]
# if so, regex_for_login_password will find it and result will not be []
# .split('\n')[-1] splits the console text on '\n' & picks last item of resulting list
result = regex_for_login_password.findall(vm_console.get_screen_text().split('\n')[-1])
return result == []
# if _validate_login() returns True, it means we did not find any of words
# [login, password, incorrect] on last line of console text, which implies login success
wait_for(func=_validate_login, timeout=300, delay=5)
logger.info("Wait to get the '$' prompt")
if provider.one_of(VMwareProvider):
vm_console.wait_for_text(text_to_find=provider.data.templates.get('console_template')
['prompt_text'], timeout=200)
else:
time.sleep(15)
# create file on system
vm_console.send_keys("touch blather")
if not (BZ.bugzilla.get_bug(1491387).is_opened):
# Test pressing ctrl-alt-delete...we should be able to get a new login prompt:
vm_console.send_ctrl_alt_delete()
assert vm_console.wait_for_text(text_to_find="login:", timeout=200,
to_disappear=True), ("Text 'login:' never disappeared, indicating failure"
" of CTRL+ALT+DEL button functionality, please check if OS reboots on "
"CTRL+ALT+DEL key combination and CTRL+ALT+DEL button on HTML5 Console is working.")
assert vm_console.wait_for_text(text_to_find="login:", timeout=200), ("VM Console"
" didn't prompt for Login")
if not provider.one_of(OpenStackProvider):
assert vm_console.send_fullscreen(), ("VM Console Toggle Full Screen button does"
" not work")
with ssh.SSHClient(hostname=vm_obj.ip_address, username=console_vm_username,
password=console_vm_password) as ssh_client:
# if file was created in previous steps it will be removed here
# we will get instance of SSHResult
# Sometimes Openstack drops characters from word 'blather' hence try to remove
# file using partial file name. Known issue, being worked on.
command_result = ssh_client.run_command("rm blather", ensure_user=True)
assert command_result
except Exception as e:
# Take a screenshot if an exception occurs
vm_console.switch_to_console()
take_screenshot("ConsoleScreenshot")
vm_console.switch_to_appliance()
raise e
finally:
vm_console.close_console_window()
# Logout is required because when running the Test back 2 back against RHV and VMware
# Providers, following issue would arise:
# If test for RHV is just finished, code would proceed to adding VMware Provider and once it
# is added, then it will navigate to Infrastructure -> Virtual Machines Page, it will see
# "Page Does not Exists" Error, because the browser will try to go to the
# VM details page of RHV VM which is already deleted
# at the End of test for RHV Provider Console and test would fail.
# Logging out would get rid of this issue.
appliance.server.logout()
| gpl-2.0 | -2,640,400,068,133,238,300 | 43.813187 | 100 | 0.643943 | false |
adsabs/citation_helper_service | citation_helper_service/citation_helper.py | 1 | 1894 | '''
Created on Nov 1, 2014
@author: ehenneken
'''
from __future__ import absolute_import
# general module imports
import sys
import os
import operator
from itertools import groupby
from flask import current_app
from .utils import get_data
from .utils import get_meta_data
__all__ = ['get_suggestions']
def get_suggestions(**args):
# initializations
papers = []
bibcodes = []
if 'bibcodes' in args:
bibcodes = args['bibcodes']
if len(bibcodes) == 0:
return []
# Any overrides for default values?
Nsuggestions = current_app.config.get('CITATION_HELPER_NUMBER_SUGGESTIONS')
# get rid of potential trailing spaces
bibcodes = [a.strip() for a in bibcodes][
:current_app.config.get('CITATION_HELPER_MAX_INPUT')]
# start processing
# get the citations for all publications (keeping multiplicity is
# essential)
papers = get_data(bibcodes=bibcodes)
if "Error" in papers:
return papers
# removes papers from the original list to get candidates
papers = [a for a in papers if a not in bibcodes]
# establish frequencies of papers in results
paperFreq = [(k, len(list(g))) for k, g in groupby(sorted(papers))]
# and sort them, most frequent first
paperFreq = sorted(paperFreq, key=operator.itemgetter(1), reverse=True)
# remove all papers with frequencies smaller than threshold
paperFreq = [a for a in paperFreq if a[1] > current_app.config.get(
'CITATION_HELPER_THRESHOLD_FREQUENCY')]
# get metadata for suggestions
meta_dict = get_meta_data(results=paperFreq[:Nsuggestions])
if "Error"in meta_dict:
return meta_dict
# return results in required format
return [{'bibcode': x, 'score': y, 'title': meta_dict[x]['title'],
'author':meta_dict[x]['author']} for (x, y) in
paperFreq[:Nsuggestions] if x in meta_dict.keys()]
| mit | -4,987,392,031,141,406,000 | 33.436364 | 79 | 0.67265 | false |
fenimore/freeebot | freeebot.py | 1 | 5664 | #!/usr/bin/env python
"""Twitter Bot for posting craigslist postings of Free Stuff
Currently set up for New York.
Example usage:
python tweetstuffs.py
Attributes:
- NO_IMAGE -- link for when there is no image found
- FILE -- path to tmp file
- PATH -- current directory
- C_KEY, C_SECRET, A_TOKEN, A_TOKEN_SECRET -- twitter api tokens
@author: Fenimore Love
@license: MIT
@date: 2015-2016
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re, sys, os, time, urllib.error, urllib.request
from datetime import datetime
from time import gmtime, strftime, sleep
import tweepy
#from freestuffs import stuff_scraper
from freestuffs.stuff_scraper import StuffScraper
from secrets import *
# ====== Individual bot configuration ==========================
bot_username = 'freeebot'
logfile = bot_username
# ==============================================================
PATH = os.getcwd()
if not os.path.exists(PATH + '/tmp/'):
os.makedirs(PATH + '/tmp/')
if not os.path.exists(PATH + '/log/'):
os.makedirs(PATH + '/log/')
NO_IMAGE = 'http://upload.wikimedia.org/wikipedia/commons/a/ac/No_image_available.svg'
FILE = PATH + '/tmp/tmp-filename.jpg'
def create_tweet(stuff):
"""Create string for tweet with stuff.
TODO: replace New York with NY
TODO: add a hashtag
"""
post = {"title": stuff['title'],
"loc" : stuff['location'],
"url" : stuff['url']}
_text = post["loc"].strip(', New York') + "\n" + post["title"] +" " + post["url"] + ' #FreeStuffNY'
_text = check_length(_text, post)
return _text
def tweet(new_stuffs_set):
"""Tweet new free stuff."""
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
# Unpack set of sorted tuples back into dicts
stuffs = map(dict, new_stuffs_set)
if len(list(new_stuffs_set)) is not 0: # if there exists new items
for stuff in stuffs:
tweet = create_tweet(stuff)
if str(stuff['image']) == NO_IMAGE:
isImage = False
else:
isImage = True
try:
urllib.request.urlretrieve(stuff['image'], FILE)
except:
log('image: '+ stuff['image'] + 'can\'t be found')
isImage = False
try:
if isImage:
log("\n\n Posting with Media \n " + tweet + "\n ----\n")
api.update_with_media(FILE, status=tweet)
else:
log("\n\n Posting without media\n "
+ tweet + "\n ----\n")
api.update_status(tweet)
except tweepy.TweepError as e:
log('Failure ' + stuff['title'])
log(e.reason)
else:
print("\n ----\n")
def check_length(tweet, post):
"""Check if tweet is proper length."""
size = len(tweet) - len(post["url"])
if size < 145: # tweet is good
return tweet
else:
log("Tweet too long")
tweet = post["loc"] + "\n" + post["title"] + " " + post["url"]
size = len(tweet) - post["url"]
if size > 144: # tweet is still not good
tweet = post["title"] + " " + post["url"]
return tweet
return tweet
def log(message):
"""Log message to logfile. And print it out."""
# TODO: fix
date = strftime("-%d-%b-%Y", gmtime())
path = os.path.realpath(os.path.join(os.getcwd(), 'log'))
with open(os.path.join(path, logfile + date + '.log'),'a+') as f:
t = strftime("%d %b %Y %H:%M:%S", gmtime())
print("\n" + t + " " + message) # print it tooo...
f.write("\n" + t + " " + message)
if __name__ == "__main__":
"""Tweet newly posted Free stuff objects.
Using sets of the tupled-sorted-dict-stuffs to compare
old scrapes and new. No need calling for precise, as
twitter doesn't need the coordinates. If the set has 15
items, doesn't post, in order to stop flooding twitter
on start up.
"""
#process_log = open(os.path.join('log', logfile_username),'a+')
_location = 'newyork' # TODO: Change to brooklyn?
stale_set = set() # the B set is what has already been
log("\n\nInitiating\n\n")
while True:
stuffs = [] # a list of dicts
for stuff in StuffScraper(_location, 15).stuffs: # convert stuff
stuff_dict = {'title':stuff.thing, # object into dict
'location':stuff.location,
'url':stuff.url, 'image':stuff.image}
stuffs.append(stuff_dict)
fresh_set = set() # A set, Fresh out the oven
for stuff in stuffs:
tup = tuple(sorted(stuff.items()))
fresh_set.add(tup)
"""Evaluate if there have been new posts"""
ready_set = fresh_set - stale_set # Get the difference
stale_set = fresh_set
if len(list(ready_set)) is not 15:
tweet(ready_set)
log("\n New Stuffs : " + str(len(list(ready_set)))+
"\n Todays Stuffs : "+ str(len(list(stale_set)))+
"\n\n Sleep Now (-_-)Zzz... \n")
sleep(1000) # 3600 Seconds = Hour
| mit | 6,432,309,924,595,103,000 | 35.307692 | 103 | 0.570798 | false |
embray/numpy | numpy/lib/npyio.py | 1 | 66490 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from ._compiled_base import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. Compressed files with the filename extension
``.gz`` are acceptable. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else:
# Try a pickle
try:
return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_rows : int, optional
`skip_rows` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `missing_values` instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(
"The use of `skiprows` is deprecated, it will be removed in "
"numpy 2.0.\nPlease use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(
"The use of `missing` is deprecated, it will be removed in "
"Numpy 2.0.\nPlease use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
#
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
else:
rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause | -2,169,429,861,298,942,200 | 33.957939 | 89 | 0.559107 | false |
ramanala/PACE | pacersmexplorer.py | 1 | 23886 | #!/usr/bin/env python
#Copyright (c) 2016 Ramnatthan Alagappan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import subprocess
import cProfile
import Queue
import threading
import time
import pprint
import code
import sys
import collections
import gc
from _paceutils import *
from pace import DSReplayer
from pacedefaultfs import defaultfs, defaultnet
import itertools
import pickle
from collections import defaultdict
import math
class MultiThreadedChecker(threading.Thread):
queue = Queue.Queue()
outputs = {}
def __init__(self, queue, thread_id='0'):
threading.Thread.__init__(self)
self.queue = MultiThreadedChecker.queue
self.thread_id = str(thread_id)
def __threaded_check(self, base_path, dirnames, client_stdout, crashid):
assert type(paceconfig(0).checker_tool) in [list, str, tuple]
dirname_param = ''
for dirname in dirnames.values():
dirname_param += str(dirname) + str('@')
args = [paceconfig(0).checker_tool, dirname_param, base_path, client_stdout, self.thread_id]
retcode = subprocess.call(args)
MultiThreadedChecker.outputs[crashid] = retcode
def run(self):
while True:
task = self.queue.get()
self.__threaded_check(*task)
self.queue.task_done()
@staticmethod
def check_later(base_path, dirnames, client_stdout, retcodeid):
MultiThreadedChecker.queue.put((base_path, dirnames, client_stdout, retcodeid))
@staticmethod
def reset():
assert MultiThreadedChecker.queue.empty()
MultiThreadedChecker.outputs = {}
@staticmethod
def wait_and_get_outputs():
MultiThreadedChecker.queue.join()
return MultiThreadedChecker.outputs
def get_crash_point_id_string(crash_point):
toret = ""
for i in range(0, len(crash_point)):
c = crash_point[i]
if c == -1:
c = 'z' # the node has not done any persistent state update
if i < len(crash_point)-1:
toret += str(c) + "-"
else:
toret += str(c)
return toret
def dict_value_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.itervalues()))
def atleast_one_present(machines, currs, ends):
for m in machines:
if currs[m] < len(ends[m]):
return True
return False
def replay_dir_base_name_RO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
base_name += "_RO"
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_OM" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_ARO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key][1])
return toret
base_name += "_ARO" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_AP(crash_point, end_pt):
assert type(end_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_AP" + dict_string(end_pt)
return base_name
def append_or_trunc_ops(replayer, machines, crash_point):
toret = {}
for machine in machines:
curr_op = replayer.micro_ops[machine][crash_point[machine]].op
toret[machine] = curr_op == 'append' or curr_op == 'trunc'
return toret
def nCr(n,r):
func = math.factorial
return func(n) / func(r) / func(n-r)
def get_replay_dirs(machines, base_name):
dirnames = {}
base_path = os.path.join(paceconfig(0).scratchpad_dir, base_name)
for machine in machines:
os.system('rm -rf ' + base_path)
os.system('mkdir -p ' + base_path)
dirnames[machine] = os.path.join(base_path , 'rdir-' + str(machine))
stdout_files = {}
for machine_id in dirnames.keys():
stdout_files[machine_id] = os.path.join(base_path, str(machine_id) + '.input_stdout')
return (base_path, dirnames,stdout_files)
def unique_grp(grps, machines, filter_machines):
assert len(machines) > 0 and len(filter_machines) < len(machines)
to_ret = []
to_ret_set = set()
temp = {}
max_for_state = defaultdict(lambda:-1, temp)
for state in grps:
state_arr = list(state)
for machine in machines:
if machine not in filter_machines:
val = state_arr[machine]
del state_arr[machine]
if tuple(state_arr) not in max_for_state.keys():
max_for_state[tuple(state_arr)] = val
else:
if max_for_state[tuple(state_arr)] < val:
max_for_state[tuple(state_arr)] = val
state_arr.insert(machine, max_for_state[tuple(state_arr)])
to_ret_set.add(tuple(state_arr))
return to_ret_set
def check_logically_same(to_omit_list):
ops_eq = all(x.op == to_omit_list[0].op for x in to_omit_list)
if ops_eq:
name_checking_ops = ['write', 'append', 'creat', 'trunc', 'unlink']
if to_omit_list[0].op in name_checking_ops:
name_eq = all(os.path.basename(x.name) == os.path.basename(to_omit_list[0].name) for x in to_omit_list)
return ops_eq and name_eq
elif to_omit_list[0].op == 'rename':
dest_eq = all(os.path.basename(x.dest) == os.path.basename(to_omit_list[0].dest) for x in to_omit_list)
src_eq = all(os.path.basename(x.source) == os.path.basename(to_omit_list[0].source) for x in to_omit_list)
return ops_eq and dest_eq and src_eq
else:
for omit in to_omit_list:
if 'fsync' in str(omit):
return False
assert False
else:
return False
def compute_reachable_global_prefixes(replayer):
print 'Computing globally reachable prefix states'
assert paceconfig(0).cached_prefix_states_file is not None and len(paceconfig(0).cached_prefix_states_file) > 0
prefix_cached_file = paceconfig(0).cached_prefix_states_file
interesting_prefix_states = []
final_reachable_prefix_fsync_deps = set()
if not os.path.isfile(prefix_cached_file):
print 'No cached file. Computing reachable prefixes from scratch.'
base_lists = replayer.ops_indexes().values()
list0 = base_lists[0]
list1 = base_lists[1]
interesting_prefix_states = []
# Algorithm to find all consistent cuts of persistent states:
# Naive method: Let us say there are 3 machines. Consider that the number of events
# in these traces from three machines as <n1, n2, n3>. So, there are n1 X n2 X n3
# ways in which these traces could combine.
# Should we check for everything?
# No, we can do better; intuition: if i X j is not consistent then any superset of
# it <i, j , k> for any k is inconsistent.
for index1 in list0:
for index2 in list1:
if replayer.is_legal_gp((index1, index2)):
interesting_prefix_states.append((index1, index2))
for i in range(2, len(base_lists)):
interesting_prefix_cache = []
for index in base_lists[i]:
for inter in interesting_prefix_states:
to_check = inter + (index, )
if replayer.is_legal_gp(to_check):
interesting_prefix_cache.append(to_check)
interesting_prefix_states = interesting_prefix_cache
for state in interesting_prefix_states:
index = 0
candidate = []
for point in state:
candidate.append(replayer.persistent_op_index(index, point))
index += 1
candidate = tuple(candidate)
final_reachable_prefix_fsync_deps.add(candidate)
with open(prefix_cached_file, "w") as f:
pickle.dump(final_reachable_prefix_fsync_deps, f, protocol = 0)
else:
print 'Using cached globally reachable states'
with open(prefix_cached_file, "r") as f:
final_reachable_prefix_fsync_deps = pickle.load(f)
final_reachable_prefix_no_deps = set(list(final_reachable_prefix_fsync_deps)[:])
assert not bool(final_reachable_prefix_no_deps.symmetric_difference(final_reachable_prefix_fsync_deps))
# We are mostly done here. But there is one more optimization that we could do.
# if a trace ends with fsync or fdatasync, then it can be skipped for replay
# because there is no specific operation that we need to replay fsyncs. However,
# they are important to calculate FS reordering dependencies. So, we maintain
# two sets: one with fsync deps (we will use when we apply FS reordering),
# one with no fsync deps that we will use to replay globally reachable prefixes
interesting_states_check = set(list(final_reachable_prefix_fsync_deps)[:])
for state in interesting_states_check:
machine = 0
for end_point in state:
if replayer.micro_ops[machine][end_point].op == 'fsync' or replayer.micro_ops[machine][end_point].op == 'fdatasync' or\
replayer.micro_ops[machine][end_point].op == 'file_sync_range':
prev_point = replayer.get_prev_op(state)
# if subsumed by another GRP, just remove this. If not subsumed, leave it
if prev_point in interesting_states_check:
final_reachable_prefix_no_deps.remove(state)
break
machine += 1
assert final_reachable_prefix_fsync_deps is not None and len(final_reachable_prefix_fsync_deps) > 0
assert final_reachable_prefix_no_deps is not None and len(final_reachable_prefix_no_deps) > 0
assert final_reachable_prefix_no_deps <= final_reachable_prefix_fsync_deps
return (final_reachable_prefix_fsync_deps, final_reachable_prefix_no_deps)
def replay_correlated_global_prefix(replayer, interesting_prefix_states, replay = True):
print 'Checking prefix crash states...'
machines = replayer.conceptual_machines()
replay_start = time.time()
count = 0
for crash_point in interesting_prefix_states:
assert len(crash_point) == len(machines)
base_name = get_crash_point_id_string(crash_point)
base_name += "_GRP"
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], get_crash_point_id_string(crash_point))
count += 1
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Prefix states : ' + str(count)
print 'Prefix replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_prefix(replayer, interesting_prefix_states, client_index, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
assert server_count == 3 and majority_count == 2
count = 0
how_many_majorities = 1
pick_server_count = majority_count
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
assert len(apm_imposed_subset_machineset) == 1
apm_imposed_machines = apm_imposed_subset_machineset[0]
for machine in machines:
replayer.load(machine, 0)
for crash_point in interesting_prefix_states:
atomic_ends = {}
atomic_currs = {}
machine = 0
for end_point in crash_point:
if machine in apm_imposed_machines:
atomic_ends[machine] = range(0, replayer.iops_len(machine, end_point))
atomic_currs[machine] = 0
machine += 1
atomic_end_list = []
while atleast_one_present(apm_imposed_machines, atomic_currs, atomic_ends):
atomic_end = {}
for machine in apm_imposed_machines:
if atomic_currs[machine] < len(atomic_ends[machine]):
atomic_end[machine] = atomic_ends[machine][atomic_currs[machine]]
else:
atomic_end[machine] = atomic_ends[machine][len(atomic_ends[machine])-1]
atomic_currs[machine] += 1
atomic_end_list.append(atomic_end)
for atomic_end in atomic_end_list:
for machine in server_machines:
if machine in apm_imposed_machines:
replayer.iops_end_at(machine, (crash_point[machine], atomic_end[machine]))
else:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
replayer.iops_end_at(client_index, (crash_point[client_index], replayer.iops_len(client_index, crash_point[client_index]) - 1))
base_name = replay_dir_base_name_AP(crash_point, atomic_end)
count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity Prefix correlated states : ' + str(count)
print 'Atomicity Prefix correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_reordering(replayer, interesting_prefix_states, client_index, replay = True):
def end_highest_so_far(machine, curr_endpoint):
machine_dict = can_omit_for_machine_endpoint[machine]
maximum = -1
for key in machine_dict.keys():
if key > maximum and key <= curr_endpoint:
maximum = key
return maximum
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
can_omit_for_machine_endpoint = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
# For now assert for 3 and 2 :)
assert server_count == 3 and majority_count == 2
for machine in machines:
can_omit_ops[machine] = defaultdict(list)
for machine in machines:
can_omit_for_machine_endpoint[machine] = defaultdict(list)
replay_start = time.time()
for machine in machines:
replayer.load(machine, 0)
# Phase 1: See what all ops can be dropped for each end point in a machine
# For example, let's say the GRP is (x, y, z). For x in machine0, there can
# be multiple ops that are before x and can still be dropped when we end at x
# For example, consider the follwing:
# x-2: creat(file)
# x-1: write(foo)
# x : write(bar)
# In the above trace, it is legal to drop creat when the machine crashes at x.
# In this phase, we will find all such points that can be dropped for each x.
for crash_point in interesting_prefix_states:
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine_id = 0
for end_point in crash_point:
can_end_highest = end_highest_so_far(machine_id, end_point)
if can_end_highest == -1:
omit_ops = [fs_op for fs_op in fs_ops[machine_id] if fs_op > -1 and fs_op < end_point]
else:
omit_ops1 = can_omit_for_machine_endpoint[machine_id][can_end_highest]
omit_ops2 = [fs_op for fs_op in fs_ops[machine_id] if fs_op >= can_end_highest and fs_op > -1 and fs_op < end_point]
omit_ops = omit_ops1 + omit_ops2
can_omit_temp = []
omit_ops_temp = []
for i in omit_ops:
replayer.mops_omit(machine_id, i)
if replayer.is_legal_reordering(machine_id):
can_omit_temp.append(i)
omit_ops_temp.append(i)
replayer.mops_include(machine_id, i)
can_omit_for_machine_endpoint[machine_id][end_point] = omit_ops_temp
can_omit_ops[machine_id][end_point] = can_omit_temp
machine_id += 1
# Phase 2: Using the points collected in phase 1, we can now see what points can be dropped across machines
# For example, for (x, y, z), if the drop dictionary looks like {x:[0, 2, 4], y:[1], z : [5, 7]}
# then we have 3*1*2 ways of dropping. Notice that we dont need to check if this is valid reordering
# It *has* to be valid state as the local drop points have been checked for this condition.
reordering_count = 0
pick_server_count = -1
how_many_majorities = 1
pick_server_count = majority_count
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
omittables = {}
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
for machine in apm_imposed_machines:
if machine != client_index:
omittables[machine] = can_omit_ops[machine][crash_point[machine]]
for omit_pt in list(dict_value_product(omittables)):
to_omit_list = []
for mac in omit_pt.keys():
curr_omit = omit_pt[mac]
to_omit_list.append(replayer.micro_ops[mac][curr_omit])
if check_logically_same(to_omit_list):
reordering_count += 1
replayer.mops_omit_group(omit_pt)
base_name = replay_dir_base_name_RO(crash_point, omit_pt)
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
replayer.mops_include_group(omit_pt)
del omittables
omittables = None
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Reordering correlated states : ' + str(reordering_count)
print 'Reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_reordering(replayer, interesting_prefix_states, client_index, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
assert server_count == 3 and majority_count == 2
atomicity_reordering_count = 0
pick_server_count = majority_count
how_many_majorities = 1
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
for machine in machines:
replayer.load(machine, 0)
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
append_trunc_indexes = append_or_trunc_ops(replayer, server_machines, crash_point)
if any(append_trunc_indexes.values()):
# First, end all machine at the GRP point
machine = 0
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine + 1
# Next we have to omit the sub (io or disk) ops as we call it
atomic_omits = {}
atomic_ro_currs = {}
machine = 0
for end_point in crash_point:
atomic_ro_currs[machine] = 0
if machine in apm_imposed_machines:
if append_trunc_indexes[machine]:
# If it is an append or trunc, break it into pieces and see for its absence
atomic_omits[machine] = range(0, replayer.iops_len(machine, end_point))
else:
# if not append, just put a marker. We will exclude this marker later
atomic_omits[machine] = [str(replayer.iops_len(machine, end_point)-1)]
machine +=1
atomic_omit_list = []
while atleast_one_present(apm_imposed_machines, atomic_ro_currs, atomic_omits):
atomic_omit = {}
for machine in apm_imposed_machines:
if atomic_ro_currs[machine] < len(atomic_omits[machine]):
atomic_omit[machine] = atomic_omits[machine][atomic_ro_currs[machine]]
else:
atomic_omit[machine] = None
atomic_ro_currs[machine] += 1
atomic_omit_list.append(atomic_omit)
for atomic_omit_x in atomic_omit_list:
atomic_omit = atomic_omit_x.copy()
base_name_prep = atomic_omit_x.copy()
for mac in apm_imposed_machines:
iop_index = atomic_omit[mac]
if type(iop_index) == str or iop_index == None:
del atomic_omit[mac]
else:
atomic_omit[mac] = (crash_point[mac], iop_index)
base_name_prep[mac] = (crash_point[mac], iop_index)
replayer.iops_omit_group(atomic_omit)
base_name = replay_dir_base_name_ARO(crash_point, base_name_prep)
atomicity_reordering_count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
replayer.iops_include_group(atomic_omit)
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity reordering correlated states : ' + str(atomicity_reordering_count)
print 'Atomicity reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def check_corr_crash_vuls(pace_configs, sock_config, threads = 1, replay = False):
print 'Parsing traces to determine logical operations ...'
#initialize the replayer
replayer = DSReplayer(pace_configs, sock_config)
#set the environment - what file system (defaultfs)? what network(defaultnet)?
replayer.set_environment(defaultfs('count', 1), defaultnet(), load_cross_deps = True)
#did we parse and understand? if yes, print.
replayer.print_ops(show_io_ops = True)
print 'Successfully parsed logical operations!'
if replay == False:
return
assert threads > 0
for i in range(0, threads):
t = MultiThreadedChecker(MultiThreadedChecker.queue, i)
t.setDaemon(True)
t.start()
(reachable_prefix_fsync_deps, reachable_prefix_no_deps) = compute_reachable_global_prefixes(replayer)
grps_0_1_no_deps = unique_grp(reachable_prefix_no_deps, replayer.conceptual_machines(), [0,1])
grps_0_1_fsync_deps = unique_grp(reachable_prefix_fsync_deps, replayer.conceptual_machines(), [0,1])
MultiThreadedChecker.reset()
replay_correlated_global_prefix(replayer, grps_0_1_no_deps, True)
MultiThreadedChecker.reset()
replay_correlated_reordering(replayer, grps_0_1_fsync_deps, replayer.client_index, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_reordering(replayer, grps_0_1_no_deps, replayer.client_index, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_prefix(replayer, grps_0_1_no_deps, replayer.client_index, True)
uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n])
os.system('cp ' + os.path.join(uppath(paceconfig(0).cached_prefix_states_file, 1), 'micro_ops') + ' ' + paceconfig(0).scratchpad_dir) | mit | -8,455,374,928,677,028,000 | 35.69278 | 134 | 0.70937 | false |
py-amigos/adengine | tests/views/test_user.py | 1 | 2439 | import json
from adengine.model import User, Ad
NOT_FOUND_ERROR = {
"error": "Not found"
}
def build_api_url(id_=None):
if id_ is not None:
return "/api/users/{}".format(id_)
return "/api/users"
def _new_ad(user, text="ad-text"):
ad = Ad(text=text, author_id=user.id)
return ad
def _new_user(name='Peter'):
user = User(email='{name}@example.com'.format(name=name),
name=name,
username=name,
password_hash='12346')
return user
def _add_user(session, user):
return _add_resource(session, user)
def _add_resource(session, resource):
session.add(resource)
session.commit()
return resource
def test_user_added(client):
"""
User should be added to the database and ID generated.
"""
user = _new_user()
result = client.post(
build_api_url(),
data=json.dumps(user.as_dict()),
content_type='application/json'
)
assert 201 == result.status_code
def test_get_all_users(session, client):
"""
Should return all added users.
"""
# given
user1 = _new_user(name='Eugene')
user2 = _new_user(name='Vova')
_add_user(session, user1)
_add_user(session, user2)
# execute
all_users = json.loads(client.get(build_api_url()).data)
# verify
assert 2 == len(all_users.get("objects"))
def test_delete_non_existing_user(session, app):
"""
Should fail in attempt to delete non-existing user.
"""
# given
user_id = -1
client = app.test_client()
# exercise
query = '/api/v1.0/users/{user_id}'.format(user_id=user_id)
result = client.delete(query)
# verify
result.status_code == 404
def test_user_deleted(session, client):
"""Should delete user using View class for users."""
# given
user = _new_user(name='to-delete')
_add_user(session, user)
# exercise
result = client.delete(build_api_url(user.id))
# verify
assert result.status_code == 204
assert None == User.query.filter_by(id=user.id).first()
def test_get_user_by_id(session, app):
"""
Should return user by its identifier.
"""
# given
user = _new_user(name='Ivan')
_add_user(session, user)
client = app.test_client()
query = build_api_url(user.id)
# exercise
user_in_db = json.loads(client.get(query).data)
# verify
assert user.id == user_in_db.get('id')
| artistic-2.0 | 717,517,338,135,256,300 | 20.394737 | 63 | 0.603936 | false |
tartley/extending_unittest | src/all_dirs_runner.py | 1 | 3508 | '''
A test runner that augments Django's standard one by finding subclasses of
unittest.TestCase no matter where they are located in the project, even in
directories which are not django apps. (the default test runner only looks in
particular modules within each django app.)
See also tests.utils.testrunner, which uses this.
'''
from inspect import getmembers, isclass
import os
from os.path import join, relpath, splitext
import sys
from unittest import TestCase, TestLoader, TestSuite
from django.test.simple import reorder_suite, DjangoTestSuiteRunner
from django.test.testcases import TestCase as DjangoTestCase
SKIP_TEST_CLASSES = set([
TestCase, DjangoTestCase,
])
def _get_module_names(root):
'''
Yield all the Python modules in the given root dir and its subdirs
'''
for subdir, dirs, fnames in os.walk(root):
for fname in fnames:
for directory in dirs:
if directory.startswith('.') or directory == 'talk':
dirs.remove(directory)
if fname.endswith('.py'):
yield relpath(join(subdir, fname))
def _to_importable_name(fname):
'''
Convert the filename of a module into the module name used to import it.
e.g. 'ordering/tests/my_test.py' -> 'esperanto.ordering.tests.my_test'
'''
fname, _ = splitext(fname)
modname = fname.replace('/', '.')
if modname.endswith('.__init__'):
modname = modname[:-9]
return modname
def _import(modname):
'''
Given a module name in 'ordering.blobs' format, imports and returns it
'''
__import__(modname)
return sys.modules[modname]
def _get_testcases(module):
'''
Yield all the TestCase subclasses defined in the given module.
'''
for name, value in getmembers(module):
if (
isclass(value) and
issubclass(value, TestCase) and
value not in SKIP_TEST_CLASSES
):
yield value
class AllDirsTestRunner(DjangoTestSuiteRunner):
def _test_matches(self, testname, command_line):
'''
Returns True if the named test should be included in the suite
'''
return (
not command_line or
any(arg in testname for arg in command_line)
)
def build_suite(self, test_labels, extra_tests=None, **kwargs):
'''
Override the base class method to return a suite consisting of all
TestCase subclasses throughought the whole project.
'''
if test_labels:
suite = TestSuite()
else:
suite = DjangoTestSuiteRunner.build_suite(
self, test_labels, extra_tests, **kwargs
)
added_test_classes = set(t.__class__ for t in suite)
loader = TestLoader()
for fname in _get_module_names(os.getcwd()):
module = _import(_to_importable_name(fname))
for test_class in _get_testcases(module):
if test_class in added_test_classes:
continue
for method_name in loader.getTestCaseNames(test_class):
testname = '.'.join([
module.__name__, test_class.__name__, method_name
])
if self._test_matches(testname, test_labels):
suite.addTest(loader.loadTestsFromName(testname))
added_test_classes.add(test_class)
return reorder_suite(suite, (TestCase,))
| bsd-3-clause | -4,128,150,580,483,212,300 | 28.982906 | 77 | 0.609179 | false |
StudyBlue/sblibs | sblibs/display/general.py | 1 | 1801 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: [email protected]
#
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
# stdlib
from functools import wraps
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2: # pragma: no cover
from itertools import izip
zip = izip
else: # pragma: no cover
zip = zip
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
# Copied from `six' library.
# Copyright (c) 2010-2015 Benjamin Peterson
# License: MIT
class metaclass(meta):
"""Dummy metaclass"""
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def cache(function):
"""
Function: cache
Summary: Decorator used to cache the input->output
Examples: An fib memoized executes at O(1) time
instead O(e^n)
Attributes:
@param (function): function
Returns: wrapped function
TODO: Give support to functions with kwargs
"""
memory = {}
miss = object()
@wraps(function)
def _wrapper(*args):
result = memory.get(args, miss)
if result is miss:
_wrapper.call += 1
result = function(*args)
memory[args] = result
return result
_wrapper.call = 0
return _wrapper
| bsd-2-clause | 1,671,279,908,194,633,700 | 23 | 78 | 0.622222 | false |
Donkyhotay/MoonPy | zope/wfmc/adapter/integration.py | 1 | 1606 | ##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Processes
$Id: integration.py 30314 2005-05-09 17:07:09Z jim $
"""
from zope import component, interface
from zope.wfmc import interfaces
interface.moduleProvides(interfaces.IIntegration)
def createParticipant(activity, process_definition_identifier, performer):
participant = component.queryAdapter(
activity, interfaces.IParticipant,
process_definition_identifier + '.' + performer)
if participant is None:
participant = component.getAdapter(
activity, interfaces.IParticipant, '.' + performer)
return participant
def createWorkItem(participant,
process_definition_identifier, application):
workitem = component.queryAdapter(
participant, interfaces.IWorkItem,
process_definition_identifier + '.' + application)
if workitem is None:
workitem = component.getAdapter(
participant, interfaces.IWorkItem, '.' + application)
return workitem
| gpl-3.0 | -3,358,966,468,289,544,700 | 34.688889 | 78 | 0.652553 | false |
microelly2/geodata | geodat/import_aster.py | 1 | 5208 | ''' geodat import AST (gdal)'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- geodat import AST (gdal)
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
#http://geoinformaticstutorial.blogspot.de/2012/09/reading-raster-data-with-python-and-gdal.html
#http://forum.freecadweb.org/viewtopic.php?f=8&t=17647&start=10#p139201
# the ast file is expected in ~/.FreeCAD/geodat/AST
# FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
'''
ASTER GDEM Policy Agreements
I agree to redistribute the ASTER GDEM *only* to individuals within my organization or project of intended use or in response to disasters in support of the GEO Disaster Theme.
When presenting or publishing ASTER GDEM data, I agree to include "ASTER GDEM is a product of METI and NASA."
Because there are known inaccuracies and artifacts in the data set, please use the product with awareness of its limitations. The data are provided "as is" and neither NASA nor METI/ERSDAC will be responsible for any damages resulting from use of the data.
'''
from geodat.say import *
import geodat.transversmercator
from geodat.transversmercator import TransverseMercator
import geodat.import_xyz
import geodat.geodat_lib
# apt-get install python-gdal
import gdal
from gdalconst import *
import WebGui
import Points
def import_ast(b=50.26,l=11.39):
'''get the data from a downloaded file
the file is expected in FreeCAD.ConfigGet("UserAppData") + '/geodat/AST/'
with the common filename for lan/lot parameters
example .../.FreeCAD/geodat/AST/ASTGTM2_N51E010_dem.tif
'''
bs=np.floor(b)
ls=np.floor(l)
# the ast dataset
ff="N%02dE%03d" % (int(bs),int(ls))
fn=FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
print(fn)
'''
fn='/home/microelly2/FCB/b217_heightmaps/tandemx_daten/Chile-Chuquicatmata.tif'
b=-22.3054705
l=-68.9259643
bs=np.floor(b)
ls=np.floor(l)
print(fn)
'''
dataset = gdal.Open(fn, GA_ReadOnly)
if dataset == None:
msg="\nProblem cannot open " + fn + "\n"
FreeCAD.Console.PrintError(msg)
errorDialog(msg)
return
cols=dataset.RasterXSize
rows=dataset.RasterYSize
geotransform = dataset.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
band = dataset.GetRasterBand(1)
data = band.ReadAsArray(0, 0, cols, rows)
#data.shape -> 3601 x 3601 secs
# erfurt 51,11
#data[0,0]
# zeitz 51,12
#data[3600,0]
# windischletten(zapfendorf) 50,11
#data[0,3600]
# troestau fichtelgebirge 50,12
#data[3600,3600]
px=int(round((bs+1-b)*3600))
py=int(round((l-ls)*3600))
pts=[]
d=70
d1=20
d2=50
d1=d
d2=d
tm=TransverseMercator()
tm.lat=b
tm.lon=l
center=tm.fromGeographic(tm.lat,tm.lon)
z0= data[px,py] # relative height to origin px,py
for x in range(px-d1,px+d1):
for y in range(py-d2,py+d2):
ll=tm.fromGeographic(bs+1-1.0/3600*x,ls+1.0/3600*y)
pt=FreeCAD.Vector(ll[0]-center[0],ll[1]-center[1], 1000.0* (data[x,y]-z0))
pts.append(pt)
# display the point cloud
p=Points.Points(pts)
Points.show(p)
return pts
s6='''
MainWindow:
VerticalLayout:
id:'main'
# setFixedHeight: 600
setFixedWidth: 600
move: PySide.QtCore.QPoint(3000,100)
QtGui.QLabel:
setText:"C O N F I G U R A T I O N"
QtGui.QLabel:
QtGui.QLineEdit:
id: 'bl'
# zeyerner wand **
#(50.2570152,11.3818337)
# outdoor inn *
#(50.3737109,11.1891891)
# roethen **
#(50.3902794,11.157629)
# kreuzung huettengrund nach judenbach ***
#(50.368209,11.2016135)
setText:"50.368209,11.2016135"
# coburg zentrum
setText:"50.2639926,10.9686946"
QtGui.QPushButton:
setText: "Create height models"
clicked.connect: app.runbl
QtGui.QPushButton:
setText: "show Map"
clicked.connect: app.showMap
'''
## the gui backend
class MyApp(object):
## create the height model
def runbl(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
import_heights(float(b),float(l),float(s))
## display the location in openstreeetmap
def showMap(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
WebGui.openBrowser( "http://www.openstreetmap.org/#map=16/"+str(b)+'/'+str(l))
## the dialog to import a gdal file
def mydialog():
'''the dialog to import a gdal file'''
app=MyApp()
import geodat
import geodat.miki as gmiki
miki=gmiki.Miki()
miki.app=app
app.root=miki
miki.run(s6)
FreeCAD.mm=miki
return miki
## import heigjs using import_xyz
def import_heights(b,l,s):
ts=time.time()
pcl=import_ast(b,l)
pts=pcl
ff="N" + str(b) + " E" + str(l)
nurbs=geodat.import_xyz.suv2(ff,pts,u=0,v=0,d=140,la=140,lb=140)
te=time.time()
print ("time to create models:",te-ts)
fn=geodat.geodat_lib.genSizeImage(size=512)
# geodat.geodat_lib.addImageTexture(nurbs,fn,scale=(8,3))
nurbs.ViewObject.Selectable = False
## test start and hide the dialog
def runtest():
m=mydialog()
m.objects[0].hide()
if __name__ == '__main__':
runtest()
def importASTER():
mydialog()
| lgpl-3.0 | -4,922,200,730,300,529,000 | 20.520661 | 256 | 0.679724 | false |
thombashi/pytablewriter | test/writer/text/test_html_writer.py | 1 | 14391 | """
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from textwrap import dedent
import pytest
import pytablewriter
from pytablewriter.style import Style
from ..._common import print_test_result
from ...data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
normal_test_data_list = [
Data(
table="",
indent=" ",
header=headers,
value=value_matrix,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">123.1</td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="right">1</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.2</td>
<td align="left">bb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right">3.0</td>
<td align="left">cccc</td>
</tr>
</tbody>
</table>
""",
),
Data(
table=None,
indent=" ",
header=None,
value=value_matrix,
expected="""<table>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">123.1</td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="right">1</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.2</td>
<td align="left">bb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right">3.0</td>
<td align="left">cccc</td>
</tr>
</tbody>
</table>
""",
),
Data(
table="tablename",
indent=" ",
header=headers,
value=[],
expected="""<table id="tablename">
<caption>tablename</caption>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody></tbody>
</table>
""",
),
Data(
table=None,
indent=" ",
header=headers,
value=None,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody></tbody>
</table>
""",
),
Data(
table="",
indent=" ",
header=headers,
value=value_matrix_with_none,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right"></td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="left"></td>
</tr>
<tr>
<td align="right"></td>
<td align="right">2.2</td>
<td align="left"></td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right"></td>
<td align="left">cccc</td>
</tr>
<tr>
<td align="right"></td>
<td align="right"></td>
<td align="left"></td>
<td align="right"></td>
<td align="left"></td>
</tr>
</tbody>
</table>
""",
),
Data(
table="tablename",
indent=" ",
header=mix_header_list,
value=mix_value_matrix,
expected="""<table id="tablename">
<caption>tablename</caption>
<thead>
<tr>
<th>i</th>
<th>f</th>
<th>c</th>
<th>if</th>
<th>ifc</th>
<th>bool</th>
<th>inf</th>
<th>nan</th>
<th>mix_num</th>
<th>time</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">1.10</td>
<td align="left">aa</td>
<td align="right">1.0</td>
<td align="right">1</td>
<td align="left">True</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">1</td>
<td align="left">2017-01-01T00:00:00</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.20</td>
<td align="left">bbb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
<td align="left">False</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">Infinity</td>
<td align="left">2017-01-02 03:04:05+09:00</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.33</td>
<td align="left">cccc</td>
<td align="right">-3.0</td>
<td align="left">ccc</td>
<td align="left">True</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">NaN</td>
<td align="left">2017-01-01T00:00:00</td>
</tr>
</tbody>
</table>
""",
),
]
table_writer_class = pytablewriter.HtmlTableWriter
class Test_HtmlTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_HtmlTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, expected):
writer = table_writer_class(
table_name=table, indent_string=indent, headers=header, value_matrix=value
)
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
assert writer.dumps() == expected
assert str(writer) == expected
def test_normal_styles(self, capsys):
writer = table_writer_class(column_styles=vut_styles)
writer.from_tabledata(vut_style_tabledata)
writer.write_table()
expected = dedent(
"""\
<table id="styletest">
<caption>style test</caption>
<thead>
<tr>
<th>none</th>
<th>empty</th>
<th>tiny</th>
<th>small</th>
<th>medium</th>
<th>large</th>
<th>null w/ bold</th>
<th>L bold</th>
<th>S italic</th>
<th>L bold italic</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">111</td>
<td align="right">111</td>
<td align="right" style="font-size:x-small">111</td>
<td align="right" style="font-size:small">111</td>
<td align="right" style="font-size:medium">111</td>
<td align="right" style="font-size:large">111</td>
<td align="left" style="font-weight:bold"></td>
<td align="right" style="font-size:large; font-weight:bold">111</td>
<td align="right" style="font-size:small; font-style:italic">111</td>
<td align="right" style="font-size:large; font-weight:bold; font-style:italic">111</td>
</tr>
<tr>
<td align="right">1234</td>
<td align="right">1234</td>
<td align="right" style="font-size:x-small">1234</td>
<td align="right" style="font-size:small">1234</td>
<td align="right" style="font-size:medium">1,234</td>
<td align="right" style="font-size:large">1 234</td>
<td align="left" style="font-weight:bold"></td>
<td align="right" style="font-size:large; font-weight:bold">1234</td>
<td align="right" style="font-size:small; font-style:italic">1234</td>
<td align="right" style="font-size:large; font-weight:bold; font-style:italic">1234</td>
</tr>
</tbody>
</table>
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
print("----- _repr_html_ -----")
out = writer._repr_html_()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_valign(self, capsys):
writer = table_writer_class(
table_name="vertical-align",
headers=[
"",
"top",
"middle",
"bottom",
"top-right",
"middle-right",
"bottom-right",
],
value_matrix=[
["te\nst", "x", "x", "x", "x", "x", "x"],
],
column_styles=[
Style(vertical_align="baseline"),
Style(vertical_align="top"),
Style(vertical_align="middle"),
Style(vertical_align="bottom"),
Style(align="right", vertical_align="top"),
Style(align="right", vertical_align="middle"),
Style(align="right", vertical_align="bottom"),
],
)
writer.write_table()
expected = """\
<table id="verticalalign">
<caption>vertical-align</caption>
<thead>
<tr>
<th></th>
<th>top</th>
<th>middle</th>
<th>bottom</th>
<th>top-right</th>
<th>middle-right</th>
<th>bottom-right</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">te<br>st</td>
<td align="left" valign="top">x</td>
<td align="left" valign="middle">x</td>
<td align="left" valign="bottom">x</td>
<td align="right" valign="top">x</td>
<td align="right" valign="middle">x</td>
<td align="right" valign="bottom">x</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_line_breaks(self, capsys):
writer = table_writer_class(
table_name="line breaks",
headers=["a\nb", "\nc\n\nd\n", "e\r\nf"],
value_matrix=[["v1\nv1", "v2\n\nv2", "v3\r\nv3"]],
)
writer.write_table()
expected = """\
<table id="linebreaks">
<caption>line breaks</caption>
<thead>
<tr>
<th>a<br>b</th>
<th><br>c<br><br>d<br></th>
<th>e<br>f</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">v1<br>v1</td>
<td align="left">v2<br><br>v2</td>
<td align="left">v3<br>v3</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_none_values(self, capsys):
writer = table_writer_class()
writer.table_name = "none value"
writer.headers = ["none"]
writer.value_matrix = [[None]]
writer.write_table()
expected = """\
<table id="nonevalue">
<caption>none value</caption>
<thead>
<tr>
<th>none</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"></td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_write_css(self, capsys):
writer = table_writer_class()
writer.table_name = "Write HTML with CSS"
writer.headers = ["int"]
writer.value_matrix = [[1]]
writer.write_table(write_css=True)
expected = """\
<style type="text/css">
.Write-HTML-with-CSS-css thead th:nth-child(1) {
text-align: left;
}
.Write-HTML-with-CSS-css tbody tr:nth-child(1) td:nth-child(1) {
text-align: right;
}
</style>
<table class="Write-HTML-with-CSS-css" id="WriteHTMLwithCSS">
<caption>Write HTML with CSS</caption>
<thead>
<tr>
<th>int</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.indent_string = indent
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
class Test_HtmlTableWriter_write_table_iter:
def test_exception(self):
writer = table_writer_class()
with pytest.raises(pytablewriter.NotSupportedError):
writer.write_table_iter()
| mit | -4,953,065,116,235,393,000 | 26.781853 | 112 | 0.468765 | false |
flavoso/gerencex | gerencex/core/tests/test_view_office_tickets.py | 1 | 1342 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import resolve_url as r
from django.test import TestCase
from django.utils import timezone
from gerencex.core.models import Restday, Office
class OfficeTicketsViewTest(TestCase):
def setUp(self):
self.office = Office.objects.create(
name='Terceira Diacomp',
initials='DIACOMP3'
)
User.objects.create_user('testuser', '[email protected]', 'senha123')
self.user = User.objects.get(username='testuser')
self.user.first_name = 'Ze'
self.user.last_name = 'Mane'
self.user.userdetail.office = self.office
self.user.save()
self.client.login(username='testuser', password='senha123')
self.resp = self.client.get(r('office_tickets'))
def test_get(self):
"""GET must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use restdays.html"""
self.assertTemplateUsed(self.resp, 'office_tickets.html')
def test_html(self):
# print(self.resp.content)
contents = [
'Terceira Diacomp',
'Ze Mane'
]
for expected in contents:
with self.subTest():
self.assertContains(self.resp, expected)
| gpl-3.0 | 6,811,396,555,601,649,000 | 30.952381 | 73 | 0.629657 | false |
Livefyre/flaubert | flaubert/preprocess.py | 1 | 19265 | import nltk
import unicodedata
import regex as re
import sys
import abc
import logging
import os
import cPickle as pickle
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
from itertools import islice
from functools import partial
from nltk.corpus import stopwords
from nltk.stem import wordnet, PorterStemmer
from nltk import pos_tag
from joblib import Parallel, delayed
from pymaptools.io import write_json_line, PathArgumentParser, GzipFileType, open_gz
from flaubert.tokenize import RegexpFeatureTokenizer
from flaubert.urls import URLParser
from flaubert.conf import CONFIG
from flaubert.HTMLParser import HTMLParser, HTMLParseError
from flaubert.utils import treebank2wordnet, lru_wrap, pd_dict_iter
from flaubert.unicode_maps import EXTRA_TRANSLATE_MAP
from flaubert.punkt import PunktTrainer, PunktLanguageVars, PunktSentenceTokenizer
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
class Replacer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def replace(self, text):
"""Calls regex's sub method with self as callable"""
@abc.abstractmethod
def replacen(self, text):
"""Calls regex's subn method with self as callable"""
class RepeatReplacer(Replacer):
"""Remove repeating characters from text
The default pattern only applies to non-decimal characters
>>> rep = RepeatReplacer(max_repeats=3)
>>> rep.replace(u"So many $100000 bills.......")
u'So many $100000 bills...'
"""
def __init__(self, pattern=u'[^\\d\\*]', max_repeats=3):
if max_repeats < 1:
raise ValueError("Invalid parameter value max_repeats={}"
.format(max_repeats))
prev = u'\\1' * max_repeats
pattern = u'(%s)' % pattern
regexp = re.compile(pattern + prev + u'+', re.UNICODE)
self.replace = partial(regexp.sub, prev)
self.replacen = partial(regexp.subn, prev)
def replace(self, text):
"""Remove repeating characters from text
Method definition needed only for abstract base class
(it is overwritten during init)
"""
pass
def replacen(self, text):
"""Remove repeating characters from text
while also returning number of substitutions made
Method definition needed only for abstract base class
(it is overwritten during init)
"""
pass
class GenericReplacer(Replacer):
__metaclass__ = abc.ABCMeta
def __init__(self, regexp):
self._re = regexp
@abc.abstractmethod
def __call__(self, match):
"""Override this to provide your own substitution method"""
def replace(self, text):
return self._re.sub(self, text)
def replacen(self, text):
return self._re.subn(self, text)
class InPlaceReplacer(GenericReplacer):
def __init__(self, replace_map=None):
if replace_map is None:
replace_map = dict()
_replacements = dict()
_regexes = list()
for idx, (key, val) in enumerate(replace_map.iteritems()):
_replacements[idx] = val
_regexes.append(u'({})'.format(key))
self._replacements = _replacements
super(InPlaceReplacer, self).__init__(re.compile(u'|'.join(_regexes), re.UNICODE | re.IGNORECASE))
def __call__(self, match):
lastindex = match.lastindex
if lastindex is None:
return u''
replacement = self._replacements[lastindex - 1]
matched_string = match.group(lastindex)
return replacement.get(matched_string.lower(), matched_string) \
if isinstance(replacement, dict) \
else replacement
class Translator(Replacer):
"""Replace certain characters
"""
def __init__(self, translate_mapping=None, translated=False):
if translated:
self._translate_map = dict((translate_mapping or {}).iteritems())
else:
self._translate_map = {ord(k): ord(v) for k, v in (translate_mapping or {}).iteritems()}
def add_inverse_map(self, inverse_mapping, translated=False):
replace_map = {}
for key, vals in (inverse_mapping or {}).iteritems():
for val in vals:
replace_map[val] = key
self.add_map(replace_map, translated=translated)
def add_map(self, mapping, translated=False):
replace_map = self._translate_map
if translated:
replace_map.update(mapping)
else:
for key, val in mapping.iteritems():
replace_map[ord(key)] = ord(val)
def replace(self, text):
"""Replace characters
"""
return text.translate(self._translate_map)
def replacen(self, text):
"""Replace characters
while also returning number of substitutions made
Method definition needed only for abstract base class
"""
pass
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
self.handled_starttags = []
self.handled_startendtags = []
self._new_lines = 0
def append_new_lines(self):
for _ in xrange(self._new_lines):
self.fed.append("\n")
self._new_lines = 0
def handle_data(self, data):
self.append_new_lines()
self.fed.append(data)
def handle_starttag(self, tag, attrs):
HTMLParser.handle_starttag(self, tag, attrs)
self.handled_starttags.append(tag)
if tag == u"br":
self._new_lines += 1
elif tag == u"p":
self._new_lines += 1
def handle_endtag(self, tag):
HTMLParser.handle_endtag(self, tag)
if tag == u"p":
self._new_lines += 1
def handle_startendtag(self, tag, attrs):
HTMLParser.handle_starttag(self, tag, attrs)
self.handled_startendtags.append(tag)
if tag == u"br":
self._new_lines += 1
def handle_entityref(self, name):
# Ignore HTML entities (already unescaped)
self.fed.append(u'&' + name)
def get_data(self):
self.append_new_lines()
return u''.join(self.fed)
class HTMLCleaner(object):
_remove_full_comment = partial(
(re.compile(ur"(?s)<!--(.*?)-->[\n]?", re.UNICODE)).sub, ur'\1')
_remove_partial_comment = partial(
(re.compile(ur"<!--", re.UNICODE)).sub, u"")
def __init__(self, strip_html=True, strip_html_comments=True):
self._strip_html = strip_html
self._strip_html_comments = strip_html_comments
def clean(self, html):
"""Remove HTML markup from the given string
"""
if self._strip_html_comments:
html = self._remove_full_comment(html)
html = self._remove_partial_comment(html)
if html and self._strip_html:
stripper = MLStripper()
try:
stripper.feed(html)
except HTMLParseError as err:
logging.exception(err)
else:
html = stripper.get_data().strip()
return html
def strip_html_bs(text):
"""
Use BeautifulSoup to strip off HTML but in such a way that <BR> and
<P> tags get rendered as new lines
"""
soup = BeautifulSoup(text)
fragments = []
for element in soup.recursiveChildGenerator():
if isinstance(element, basestring):
fragments.append(element.strip())
elif element.name == 'br':
fragments.append(u"\n")
elif element.name == 'p':
fragments.append(u"\n")
result = u"".join(fragments).strip()
return result
class SimpleSentenceTokenizer(object):
def __init__(self, lemmatizer=None, stemmer=None, url_parser=None,
unicode_form='NFKC', nltk_stop_words="english",
sentence_tokenizer=('nltk_data', 'tokenizers/punkt/english.pickle'),
max_char_repeats=3, lru_cache_size=50000, translate_map_inv=None,
replace_map=None, html_renderer='default', add_abbrev_types=None,
del_sent_starters=None):
self._unicode_normalize = partial(unicodedata.normalize, unicode_form)
self._replace_inplace = InPlaceReplacer(replace_map).replace \
if replace_map else lambda x: x
self._tokenize = RegexpFeatureTokenizer().tokenize
self._stopwords = frozenset(stopwords.words(nltk_stop_words))
self._url_parser = url_parser
self._sentence_tokenizer, self._sentence_tokenize = \
self.load_sent_tokenizer(sentence_tokenizer, add_abbrev_types, del_sent_starters)
self.sentence_tokenizer = None
self._lemmatize = lru_wrap(lemmatizer.lemmatize, lru_cache_size) if lemmatizer else None
self._stem = stemmer.stem if stemmer else None
self._pos_tag = pos_tag
self._replace_char_repeats = \
RepeatReplacer(max_repeats=max_char_repeats).replace \
if max_char_repeats > 0 else self._identity
# translation of Unicode characters
translator = Translator(EXTRA_TRANSLATE_MAP, translated=True)
translator.add_inverse_map(translate_map_inv, translated=False)
self._replace_chars = translator.replace
if html_renderer is None:
self.strip_html = lambda x: x
elif html_renderer == u'default':
self.strip_html = HTMLCleaner().clean
elif html_renderer == u'beautifulsoup':
self.strip_html = strip_html_bs
else:
raise ValueError('Invalid parameter value given for `html_renderer`')
# tokenize a dummy string b/c lemmatizer and/or other tools can take
# a while to initialize screwing up our attempts to measure performance
self.tokenize(u"dummy string")
@staticmethod
def load_sent_tokenizer(sentence_tokenizer, add_abbrev_types=None, del_sent_starters=None):
_sentence_tokenizer = None
_sentence_tokenize = lambda x: [x]
if sentence_tokenizer is not None:
if sentence_tokenizer[0] == 'nltk_data':
punkt = nltk.data.load(sentence_tokenizer[1])
# TODO: why was the (now commented-out) line below here?
# return punkt, punkt.tokenize
return punkt, punkt.sentences_from_text
elif sentence_tokenizer[0] == 'data':
tokenizer_path = os.path.join('..', 'data', sentence_tokenizer[1])
tokenizer_path = resource_filename(__name__, tokenizer_path)
if os.path.exists(tokenizer_path):
with open_gz(tokenizer_path, 'rb') as fhandle:
try:
punkt = pickle.load(fhandle)
except EOFError:
logging.warn("Could not load tokenizer from %s", tokenizer_path)
return _sentence_tokenizer, _sentence_tokenize
if add_abbrev_types:
punkt._params.abbrev_types = punkt._params.abbrev_types | set(add_abbrev_types)
if del_sent_starters:
punkt._params.sent_starters = punkt._params.sent_starters - set(del_sent_starters)
return punkt, punkt.sentences_from_text
else:
logging.warn("Tokenizer not found at %s", tokenizer_path)
else:
raise ValueError("Invalid sentence tokenizer class")
return _sentence_tokenizer, _sentence_tokenize
@staticmethod
def _identity(arg):
return arg
def unicode_normalize(self, text):
# 1. Normalize to specific Unicode form (also replaces ellipsis with
# periods)
text = self._unicode_normalize(text)
# 2. Replace certain chars such as n- and m-dashes
text = self._replace_inplace(text)
return text
def preprocess(self, text, lowercase=True):
# 1. Remove HTML
text = self.strip_html(text)
# 2. Normalize Unicode
text = self.unicode_normalize(text)
# 3. Replace certain characters
text = self._replace_chars(text)
# 4. whiteout URLs
text = self._url_parser.whiteout_urls(text)
# 5. Lowercase
if lowercase:
text = text.lower()
# 6. Reduce repeated characters to specified number (usually 3)
text = self._replace_char_repeats(text)
return text
def word_tokenize(self, text, lowercase=True, preprocess=True, remove_stopwords=False):
# 1. Misc. preprocessing
if preprocess:
text = self.preprocess(text, lowercase=lowercase)
elif lowercase:
text = text.lower()
# 2. Tokenize
words = self._tokenize(text)
# 3. Lemmatize or stem based on POS tags
if self._lemmatize:
final_words = []
lemmatize = self._lemmatize
for word, tag in self._pos_tag(words):
wordnet_tag = treebank2wordnet(tag)
if wordnet_tag is not None:
word = lemmatize(word, pos=wordnet_tag)
final_words.append(word)
words = final_words
elif self._stem:
stem = self._stem
words = [stem(word) for word in words]
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stop_words = self._stopwords
words = [word for word in words if word not in stop_words]
# 5. Return a list of words
return words
def sentence_tokenize(self, text, preprocess=True,
remove_stopwords=False):
if preprocess:
text = self.preprocess(text, lowercase=False)
sentences = []
for raw_sentence in self._sentence_tokenize(text):
if not raw_sentence:
continue
words = self.word_tokenize(
raw_sentence, preprocess=False, lowercase=True,
remove_stopwords=remove_stopwords)
if not words:
continue
sentences.append(words)
return sentences
tokenize = word_tokenize
def train_sentence_model(self, iterator, verbose=False, show_progress=1000):
reviews = []
for idx, review in enumerate(iterator, start=1):
if show_progress and idx % show_progress == 0:
logging.info("Processing review %d", idx)
review = self.preprocess(review, lowercase=False).strip()
if not review.endswith(u'.'):
review += u'.'
reviews.append(review)
text = u'\n\n'.join(reviews)
custom_lang_vars = PunktLanguageVars
custom_lang_vars.sent_end_chars = ('.', '?', '!')
# TODO: check if we need to manually specify common abbreviations
punkt = PunktTrainer(verbose=verbose, lang_vars=custom_lang_vars())
abbrev_sent = u'Start %s end.' % u' '.join(CONFIG['tokenizer']['add_abbrev_types'])
punkt.train(abbrev_sent, finalize=False)
punkt.train(text, finalize=False)
punkt.finalize_training()
params = punkt.get_params()
if self._sentence_tokenizer:
self._sentence_tokenizer._params = params
else:
model = PunktSentenceTokenizer()
model._params = params
self._sentence_tokenizer = model
self._sentence_tokenize = model.sentences_from_tokens
def train(self, iterator, verbose=False, show_progress=1000):
self.train_sentence_model(iterator, verbose=verbose, show_progress=show_progress)
def save_sentence_model(self, output_file):
pickle.dump(self._sentence_tokenizer, output_file, protocol=pickle.HIGHEST_PROTOCOL)
def registry(key):
"""
retrieves objects given keys from config
"""
if key is None:
return None
elif key == 'wordnet':
return wordnet.WordNetLemmatizer()
elif key == 'porter':
return PorterStemmer()
def tokenizer_builder():
return SimpleSentenceTokenizer(
lemmatizer=registry(CONFIG['preprocess']['lemmatizer']),
stemmer=registry(CONFIG['preprocess']['stemmer']),
url_parser=URLParser(),
**CONFIG['tokenizer'])
TOKENIZER = tokenizer_builder()
def get_sentences(field, row, **kwargs):
sentences = []
text = row[field]
for sentence in TOKENIZER.sentence_tokenize(text, **kwargs):
sentences.append(sentence)
row[field] = sentences
return row
def get_words(field, row, **kwargs):
text = row[field]
words = TOKENIZER.tokenize(text, **kwargs)
row[field] = words
return row
def get_review_iterator(args):
iterator = pd_dict_iter(args.input, chunksize=1000)
if args.limit:
iterator = islice(iterator, args.limit)
return iterator
def get_mapper_method(args):
if args.sentences:
mapper = get_sentences
else:
mapper = get_words
return mapper
def run_tokenize(args):
iterator = get_review_iterator(args)
mapper = get_mapper_method(args)
write_record = partial(write_json_line, args.output)
field = args.field
if args.n_jobs == 1:
# turn off parallelism
for row in iterator:
record = mapper(field, row)
write_record(record)
else:
# enable parallellism
for record in Parallel(n_jobs=args.n_jobs, verbose=10)(
delayed(mapper)(field, row) for row in iterator):
write_record(record)
def train_sentence_tokenizer(args):
field = args.field
iterator = (obj[field] for obj in get_review_iterator(args))
TOKENIZER.train(iterator, verbose=args.verbose)
TOKENIZER.save_sentence_model(args.output)
def parse_args(args=None):
parser = PathArgumentParser()
parser.add_argument('--input', type=GzipFileType('r'), default=[sys.stdin], nargs='*',
help='Input file (in TSV format, optionally compressed)')
parser.add_argument('--field', type=str, default='review',
help='Field name (Default: review)')
parser.add_argument('--limit', type=int, default=None,
help='Only process this many lines (for testing)')
parser.add_argument('--n_jobs', type=int, default=-1,
help="Number of jobs to run")
parser.add_argument('--output', type=GzipFileType('w'), default=sys.stdout,
help='Output file')
subparsers = parser.add_subparsers()
parser_tokenize = subparsers.add_parser('tokenize')
parser_tokenize.add_argument('--sentences', action='store_true',
help='split on sentences')
parser_tokenize.set_defaults(func=run_tokenize)
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--verbose', action='store_true',
help='be verbose')
parser_train.set_defaults(func=train_sentence_tokenizer)
namespace = parser.parse_args(args)
return namespace
def run():
args = parse_args()
args.func(args)
if __name__ == "__main__":
run()
| mit | -4,700,687,090,115,815,000 | 33.649281 | 106 | 0.606956 | false |
sloede/modm | modfileparser.py | 1 | 6189 | #!/usr/bin/env python
# Modm - Modules iMproved
# Copyright (C) 2013-2014 Michael Schlottke
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# System imports
import os
import shlex
# Project imports
from env import Env,EnvVariable
from basheval import BashEval
class ModfileParser:
"""
Class to parse module files and execute commands found in them.
"""
backup_prefix = 'MODM_BACKUP_'
def __init__(self, env=Env(), basheval=BashEval()):
"""Save arguments to class and initialize list of valid commands.
Arguments:
env -- object to handle environment variables
basheval -- object to convert commands to Bash evaluation strings
"""
# Save arguments
self.env = env
self.be = basheval
# Init commands
self.commands = dict()
self.init_commands()
# Init other members
self.do_unload = False
def init_commands(self):
"""Initialize all commands that are supported in module files."""
self.commands['prepend_path'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='path')
self.commands['prepend_string'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='string')
self.commands['print'] = self.cmd_print
self.commands['print_load'] = lambda *x: self.cmd_print(
*x,
unload=False)
self.commands['print_unload'] = lambda *x: self.cmd_print(
*x,
load=False)
self.commands['set'] = self.cmd_set
def cmd_prepend_variable(self, name, value, kind='string'):
"""Prepend variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Prepend value (or undo prepend)
self.env.variables[name].prepend(value, undo=self.do_unload)
def cmd_append_variable(self, name, value, kind='string'):
"""Append variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Append value (or undo append)
self.env.variables[name].append(value, undo=self.do_unload)
def cmd_print(self, message, load=True, unload=True):
"""Print `message`."""
if (load and not self.do_unload) or (unload and self.do_unload):
self.be.echo(message)
def cmd_set(self, name, value):
"""Set variable `name` to `value`.
Save backup of `name` if it exists already, and restore the
original value upon unloading.
"""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name)
# Determine name of potential backup variable and create backup variable
# if it does not exist
backupname = self.backup_prefix + name
if backupname not in self.env.variables:
self.env.variables[backupname] = EnvVariable(backupname)
# If variable is to be set, check if it is already set and save backup
if not self.do_unload:
if self.env.variables[name].is_set():
self.env.variables[backupname].set_value(
self.env.variables[name].get_value())
self.env.variables[name].set_value(value)
# If variable is to be unset, check if backup variable exists and
# restore it
else:
if self.env.variables[backupname].is_set():
self.env.variables[name].set_value(
self.env.variables[backupname].get_value())
self.env.variables[backupname].unset()
else:
self.env.variables[name].unset()
def load(self, modfile):
"""Load module file `modfile`."""
self.do_unload = False
return self.parse(modfile)
def unload(self, modfile):
"""Unload module file `modfile`."""
self.do_unload = True
return self.parse(modfile)
def parse(self, modfile):
"""Parse module file `modfile` and execute commands that are found.
Return true if parsing was successful, otherwise false."""
# Return without doing anything if file is not found
if not os.path.isfile(modfile):
return
# Read module file
with open(modfile, 'r') as f:
lines = f.readlines()
# Try to parse each line into shell tokens or die
try:
splitlines = [shlex.split(line) for line in lines]
except Exception as e:
self.be.error("Bad syntax in module file '{mf}': {e} ({n})".format(
mf=modfile, e=e, n=type(e).__name__))
return False
# Parse each line indicidually
for tokens in splitlines:
# Skip line if there were no tokens
if len(tokens) == 0:
continue
# First token is command, rest (if existing) are arguments
cmd = tokens[0]
args = tokens[1:]
# If command exists, execute it while providing the arguments from
# the file
if cmd in self.commands:
self.commands[cmd](*args)
# Return true to indicate that nothing was wrong
return True
| gpl-2.0 | 8,359,449,882,160,916,000 | 34.774566 | 80 | 0.604944 | false |
cp16net/trove | trove/tests/unittests/guestagent/test_dbaas.py | 1 | 144181 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import subprocess
import tempfile
import time
from uuid import uuid4
from mock import ANY
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
from oslo_utils import netutils
import sqlalchemy
import testtools
from testtools.matchers import Equals
from testtools.matchers import Is
from testtools.matchers import Not
from trove.common import cfg
from trove.common.exception import BadRequest
from trove.common.exception import GuestError
from trove.common.exception import PollTimeOut
from trove.common.exception import ProcessExecutionError
from trove.common import instance as rd_instance
from trove.common import utils
from trove.conductor import api as conductor_api
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.cassandra import (
service as cass_service)
from trove.guestagent.datastore.experimental.cassandra import (
system as cass_system)
from trove.guestagent.datastore.experimental.couchbase import (
service as couchservice)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent.datastore.experimental.db2 import (
service as db2service)
from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.redis import service as rservice
from trove.guestagent.datastore.experimental.redis.service import RedisApp
from trove.guestagent.datastore.experimental.redis import system as RedisSystem
from trove.guestagent.datastore.experimental.vertica import (
system as vertica_system)
from trove.guestagent.datastore.experimental.vertica.service import (
VerticaAppStatus)
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
import trove.guestagent.datastore.mysql.service as dbaas
from trove.guestagent.datastore.mysql.service import KeepAliveConnection
from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import MySqlRootAccess
from trove.guestagent.datastore.service import BaseDbStatus
from trove.guestagent.db import models
from trove.guestagent import dbaas as dbaas_sr
from trove.guestagent.dbaas import get_filesystem_volume_stats
from trove.guestagent.dbaas import to_gb
from trove.guestagent import pkg
from trove.guestagent.volume import VolumeDevice
from trove.instance.models import InstanceServiceStatus
from trove.tests.unittests.util import util
CONF = cfg.CONF
"""
Unit tests for the classes and functions in dbaas.py.
"""
FAKE_DB = {"_name": "testDB", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_USER = [{"_name": "random", "_password": "guesswhat",
"_host": "%", "_databases": [FAKE_DB]}]
conductor_api.API.get_client = Mock()
conductor_api.API.heartbeat = Mock()
class FakeAppStatus(BaseDbStatus):
def __init__(self, id, status):
self.id = id
self.next_fake_status = status
def _get_actual_db_status(self):
return self.next_fake_status
def set_next_status(self, next_status):
self.next_fake_status = next_status
def _is_query_router(self):
return False
class DbaasTest(testtools.TestCase):
def setUp(self):
super(DbaasTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_utils_execute = dbaas.utils.execute
def tearDown(self):
super(DbaasTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.utils.execute = self.orig_utils_execute
@patch.object(operating_system, 'remove')
def test_clear_expired_password(self, mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
return_value=(secret_content, None)):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
self.assertEqual(1, mock_remove.call_count)
@patch.object(operating_system, 'remove')
def test_no_secret_content_clear_expired_password(self, mock_remove):
with patch.object(dbaas.utils, 'execute', return_value=('', None)):
dbaas.clear_expired_password()
self.assertEqual(1, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
def test_fail_password_update_content_clear_expired_password(self,
mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
side_effect=[(secret_content, None),
ProcessExecutionError]):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
@patch.object(dbaas.utils, 'execute', side_effect=ProcessExecutionError)
def test_fail_retrieve_secret_content_clear_expired_password(self,
mock_execute,
mock_remove):
dbaas.clear_expired_password()
self.assertEqual(1, mock_execute.call_count)
mock_remove.assert_not_called()
def test_get_auth_password(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password ", None))
password = dbaas.get_auth_password()
self.assertEqual("password", password)
def test_get_auth_password_error(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password", "Error"))
self.assertRaises(RuntimeError, dbaas.get_auth_password)
def test_service_discovery(self):
with patch.object(os.path, 'isfile', return_value=True):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.assertIsNotNone(mysql_service['cmd_start'])
self.assertIsNotNone(mysql_service['cmd_enable'])
def test_load_mysqld_options(self):
output = "mysqld would've been started with the these args:\n"\
"--user=mysql --port=3306 --basedir=/usr "\
"--tmpdir=/tmp --skip-external-locking"
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(5, len(options))
self.assertEqual(["mysql"], options["user"])
self.assertEqual(["3306"], options["port"])
self.assertEqual(["/usr"], options["basedir"])
self.assertEqual(["/tmp"], options["tmpdir"])
self.assertTrue("skip-external-locking" in options)
def test_load_mysqld_options_contains_plugin_loads_options(self):
output = ("mysqld would've been started with the these args:\n"
"--plugin-load=blackhole=ha_blackhole.so "
"--plugin-load=federated=ha_federated.so")
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(1, len(options))
self.assertEqual(["blackhole=ha_blackhole.so",
"federated=ha_federated.so"],
options["plugin-load"])
@patch.object(os.path, 'isfile', return_value=True)
def test_load_mysqld_options_error(self, mock_exists):
dbaas.utils.execute = Mock(side_effect=ProcessExecutionError())
self.assertFalse(dbaas.load_mysqld_options())
def test_get_datadir(self):
cnf_value = '[mysqld]\ndatadir=/var/lib/mysql/data'
with patch.object(dbaas, 'read_mycnf', Mock(return_value=cnf_value)):
self.assertEqual('/var/lib/mysql/data',
dbaas.get_datadir(reset_cache=True))
class ResultSetStub(object):
def __init__(self, rows):
self._rows = rows
def __iter__(self):
return self._rows.__iter__()
@property
def rowcount(self):
return len(self._rows)
def __repr__(self):
return self._rows.__repr__()
class MySqlAdminMockTest(testtools.TestCase):
def tearDown(self):
super(MySqlAdminMockTest, self).tearDown()
def test_list_databases(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute',
return_value=ResultSetStub(
[('db1', 'utf8', 'utf8_bin'),
('db2', 'utf8', 'utf8_bin'),
('db3', 'utf8', 'utf8_bin')])):
databases, next_marker = MySqlAdmin().list_databases(limit=10)
self.assertThat(next_marker, Is(None))
self.assertThat(len(databases), Is(3))
class MySqlAdminTest(testtools.TestCase):
def setUp(self):
super(MySqlAdminTest, self).setUp()
self.orig_get_engine = dbaas.get_engine
self.orig_LocalSqlClient = dbaas.LocalSqlClient
self.orig_LocalSqlClient_enter = dbaas.LocalSqlClient.__enter__
self.orig_LocalSqlClient_exit = dbaas.LocalSqlClient.__exit__
self.orig_LocalSqlClient_execute = dbaas.LocalSqlClient.execute
self.orig_MySQLUser_is_valid_user_name = (
models.MySQLUser._is_valid_user_name)
dbaas.get_engine = MagicMock(name='get_engine')
dbaas.LocalSqlClient = Mock
dbaas.LocalSqlClient.__enter__ = Mock()
dbaas.LocalSqlClient.__exit__ = Mock()
dbaas.LocalSqlClient.execute = Mock()
self.mySqlAdmin = MySqlAdmin()
def tearDown(self):
super(MySqlAdminTest, self).tearDown()
dbaas.get_engine = self.orig_get_engine
dbaas.LocalSqlClient = self.orig_LocalSqlClient
dbaas.LocalSqlClient.__enter__ = self.orig_LocalSqlClient_enter
dbaas.LocalSqlClient.__exit__ = self.orig_LocalSqlClient_exit
dbaas.LocalSqlClient.execute = self.orig_LocalSqlClient_execute
models.MySQLUser._is_valid_user_name = (
self.orig_MySQLUser_is_valid_user_name)
def test__associate_dbs(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"},
{"grantee": "'test_user'@'%'", "table_schema": "db3"},
{"grantee": "'test_user1'@'%'", "table_schema": "db1"},
{"grantee": "'test_user1'@'%'", "table_schema": "db3"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = []
expected = ("SELECT grantee, table_schema FROM "
"information_schema.SCHEMA_PRIVILEGES WHERE privilege_type"
" != 'USAGE' GROUP BY grantee, table_schema;")
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
self.mySqlAdmin._associate_dbs(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(3, len(user.databases))
self.assertEqual(expected, args[0].text,
"Associate database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_change_passwords(self):
user = [{"name": "test_user", "host": "%", "password": "password"}]
self.mySqlAdmin.change_passwords(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Change password queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_password(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"password": "password"}
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%',
user_attrs)
self.assertEqual(0,
self.mySqlAdmin.grant_access.call_count)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_name(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"name": "new_name"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'new_name', '%', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET User='new_name' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_host(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"host": "new_host"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'test_user', 'new_host', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Host='new_host' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_database(self):
databases = []
databases.append(FAKE_DB)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(1, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly once, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_more_than_1(self):
databases = []
databases.append(FAKE_DB)
databases.append(FAKE_DB_2)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB2` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly twice, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_no_db(self):
databases = []
self.mySqlAdmin.create_database(databases)
self.assertFalse(dbaas.LocalSqlClient.execute.called,
"The client object was called when it wasn't " +
"supposed to")
def test_delete_database(self):
database = {"_name": "testDB"}
self.mySqlAdmin.delete_database(database)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = "DROP DATABASE `testDB`;"
self.assertEqual(expected, args[0].text,
"Delete database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_delete_user(self):
user = {"_name": "testUser", "_host": None}
self.mySqlAdmin.delete_user(user)
# For some reason, call_args is None.
call_args = dbaas.LocalSqlClient.execute.call_args
if call_args is not None:
args, _ = call_args
expected = "DROP USER `testUser`@`%`;"
self.assertEqual(expected, args[0].text,
"Delete user queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_user(self):
self.mySqlAdmin.create_user(FAKE_USER)
access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO "
"`random`@`%` IDENTIFIED BY 'guesswhat';")
create_user_expected = ("GRANT USAGE ON *.* TO `random`@`%` "
"IDENTIFIED BY 'guesswhat';")
create_user, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(create_user_expected, create_user[0].text,
"Create user queries are not the same")
access_grants, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
self.assertEqual(access_grants_expected, access_grants[0].text,
"Create user queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count)
def test_list_databases(self):
self.mySqlAdmin.list_databases()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
def test_list_databases_with_limit(self):
limit = 2
self.mySqlAdmin.list_databases(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertTrue("LIMIT " + str(limit + 1) in args[0].text)
def test_list_databases_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND schema_name > '" + marker + "'" in args[0].text)
def test_list_databases_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue(("AND schema_name >= '%s'" % marker) in args[0].text)
def test_list_users(self):
self.mySqlAdmin.list_users()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertFalse("AND Marker > '" in args[0].text)
def test_list_users_with_limit(self):
limit = 2
self.mySqlAdmin.list_users(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
("LIMIT " + str(limit + 1)),
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
def test_list_users_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host, Marker",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker > '" + marker + "'" in args[0].text)
def test_list_users_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker >= '" + marker + "'" in args[0].text)
@patch.object(dbaas.MySqlAdmin, '_associate_dbs')
def test_get_user(self, mock_associate_dbs):
"""
Unit tests for mySqlAdmin.get_user.
This test case checks if the sql query formed by the get_user method
is correct or not by checking with expected query.
"""
username = "user1"
hostname = "%"
user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}]
dbaas.LocalSqlClient.execute.return_value.fetchall = Mock(
return_value=user)
self.mySqlAdmin.get_user(username, hostname)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost' AND User = 'user1'",
"ORDER BY User, Host",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertEqual(1, mock_associate_dbs.call_count)
def test_fail_get_user(self):
username = "os_admin"
hostname = "host"
self.assertRaisesRegexp(BadRequest, "Username os_admin is not valid",
self.mySqlAdmin.get_user, username, hostname)
def test_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` "
"IDENTIFIED BY PASSWORD 'some_password';")
self.assertEqual(expected, args[0].text,
"Grant access queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_fail_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['mysql']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
# since mysql is not a database to be provided access to,
# testing that executed was not called in grant access.
dbaas.LocalSqlClient.execute.assert_not_called()
def test_is_root_enabled(self):
self.mySqlAdmin.is_root_enabled()
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("SELECT User FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(expected, args[0].text,
"Find root enabled queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_revoke_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.revoke_access('test_usr', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;")
self.assertEqual(expected, args[0].text,
"Revoke access queries are not the same")
def test_list_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = ['db1', 'db2']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
databases = self.mySqlAdmin.list_access('test_usr', '%')
self.assertEqual(2, len(databases),
"List access queries are not the same")
class MySqlAppTest(testtools.TestCase):
def setUp(self):
super(MySqlAppTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_time_sleep = time.sleep
self.orig_unlink = os.unlink
self.orig_get_auth_password = dbaas.get_auth_password
self.orig_service_discovery = operating_system.service_discovery
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mySqlApp = MySqlApp(self.appStatus)
mysql_service = {'cmd_start': Mock(),
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'bin': Mock()}
operating_system.service_discovery = Mock(
return_value=mysql_service)
time.sleep = Mock()
os.unlink = Mock()
dbaas.get_auth_password = Mock()
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
time.sleep = self.orig_time_sleep
os.unlink = self.orig_unlink
operating_system.service_discovery = self.orig_service_discovery
dbaas.get_auth_password = self.orig_get_auth_password
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def mysql_starts_successfully(self):
def start(update_db=False):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql.side_effect = start
def mysql_starts_unsuccessfully(self):
def start():
raise RuntimeError("MySQL failed to start!")
self.mySqlApp.start_mysql.side_effect = start
def mysql_stops_successfully(self):
def stop():
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db.side_effect = stop
def mysql_stops_unsuccessfully(self):
def stop():
raise RuntimeError("MySQL failed to stop!")
self.mySqlApp.stop_db.side_effect = stop
def test_stop_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_do_not_start_on_reboot(self, mock_execute):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True, True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
self.assertEqual(2, mock_execute.call_count)
def test_stop_mysql_error(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mySqlApp.stop_db)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_key_error(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp.stop_db)
self.assertEqual(0, mock_execute.call_count)
def test_restart_is_successful(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
self.mySqlApp.restart()
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_restart_mysql_wont_start_up(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_unsuccessfully()
self.mysql_starts_unsuccessfully()
self.assertRaises(RuntimeError, self.mySqlApp.restart)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_wipe_ib_logfiles_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError,
self.mySqlApp.wipe_ib_logfiles)
def test_start_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.start_mysql()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql(update_db=True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_start_mysql_runs_forever(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_mysql_error(self):
self.mySqlApp._enable_mysql_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
def test_start_db_with_conf_changes(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_starts_successfully()
self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.mySqlApp.start_db_with_conf_changes(Mock())
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertEqual(rd_instance.ServiceStatuses.RUNNING,
self.appStatus._get_actual_db_status())
def test_start_db_with_conf_changes_mysql_is_running(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mySqlApp.start_db_with_conf_changes,
Mock())
def test_remove_overrides(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError, self.mySqlApp.start_mysql)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas, 'get_auth_password', return_value='some_password')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test_reset_configuration(self, mock_write_overrides,
mock_get_auth_password, mock_remove,
mock_move):
configuration = {'config_contents': 'some junk'}
self.mySqlApp.reset_configuration(configuration=configuration)
self.assertEqual(1, mock_get_auth_password.call_count)
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(0, mock_write_overrides.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test__write_mycnf(self, mock_write_overrides, mock_remove, mock_move):
self.mySqlApp._write_mycnf('some_password', 'some junk', 'something')
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(1, mock_write_overrides.call_count)
def test_mysql_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, os.unlink.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
def test_mysql_error_in_write_config(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__enable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._enable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_enable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._enable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__disable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._disable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_disable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._disable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
@patch.object(utils, 'execute_with_timeout')
def test_update_overrides(self, mock_execute, mock_chmod, mock_move):
override_value = 'something'
self.mySqlApp.update_overrides(override_value)
with open(dbaas.MYCNF_OVERRIDES_TMP, 'r') as test_file:
test_data = test_file.read()
self.assertEqual(override_value, test_data)
mock_chmod.assert_called_with(dbaas.MYCNF_OVERRIDES,
dbaas.FileMode.SET_GRP_RW_OTH_R,
as_root=True)
mock_move.assert_called_with(dbaas.MYCNF_OVERRIDES_TMP,
dbaas.MYCNF_OVERRIDES, as_root=True)
# Remove the residual file
os.remove(dbaas.MYCNF_OVERRIDES_TMP)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_override(self, mock_remove, mock_exists):
self.mySqlApp.remove_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
mock_remove.assert_called_once_with(ANY, as_root=True)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
def test_write_replication_source_overrides(self, mock_chmod, mock_move):
self.mySqlApp.write_replication_source_overrides('something')
self.assertEqual(1, mock_move.call_count)
self.assertEqual(1, mock_chmod.call_count)
@patch.object(dbaas.MySqlApp, '_write_replication_overrides')
def test_write_replication_replica_overrides(self, mock_write_overrides):
self.mySqlApp.write_replication_replica_overrides('something')
self.assertEqual(1, mock_write_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_replication_source_overrides(self, mock_remove, mock_exists
):
self.mySqlApp.remove_replication_source_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
@patch.object(dbaas.MySqlApp, '_remove_replication_overrides')
def test_remove_replication_replica_overrides(self, mock_remove_overrides):
self.mySqlApp.remove_replication_replica_overrides()
self.assertEqual(1, mock_remove_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
def test_exists_replication_source_overrides(self, mock_exists):
self.assertTrue(self.mySqlApp.exists_replication_source_overrides())
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_grant_replication_privilege(self, *args):
replication_user = {'name': 'testUSr', 'password': 'somePwd'}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.grant_replication_privilege(replication_user)
args, _ = self.mock_execute.call_args_list[0]
expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` "
"IDENTIFIED BY 'somePwd';")
self.assertEqual(expected, args[0].text,
"Replication grant statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_port(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.get_port()
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@port")
self.assertEqual(expected, args[0],
"Port queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_binlog_position(self, *args):
result = {'File': 'mysql-bin.003', 'Position': '73'}
self.mock_execute.return_value.first = Mock(return_value=result)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
found_result = self.mySqlApp.get_binlog_position()
self.assertEqual(result['File'], found_result['log_file'])
self.assertEqual(result['Position'], found_result['position'])
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW MASTER STATUS")
self.assertEqual(expected, args[0],
"Master status queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_execute_on_client(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.execute_on_client('show tables')
args, _ = self.mock_execute.call_args_list[0]
expected = ("show tables")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_start_slave(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.start_slave()
args, _ = self.mock_execute.call_args_list[0]
expected = ("START SLAVE")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_with_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(True)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_without_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(False)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL",
"DROP USER root"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_stop_master(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.stop_master()
args, _ = self.mock_execute.call_args_list[0]
expected = ("RESET MASTER")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__wait_for_slave_status(self, *args):
mock_client = Mock()
mock_client.execute = Mock()
result = ['Slave_running', 'on']
mock_client.execute.return_value.first = Mock(return_value=result)
self.mySqlApp._wait_for_slave_status('ON', mock_client, 5)
args, _ = mock_client.execute.call_args_list[0]
expected = ("SHOW GLOBAL STATUS like 'slave_running'")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'poll_until', side_effect=PollTimeOut)
def test_fail__wait_for_slave_status(self, *args):
self.assertRaisesRegexp(RuntimeError,
"Replication is not on after 5 seconds.",
self.mySqlApp._wait_for_slave_status, 'ON',
Mock(), 5)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__get_slave_status(self, *args):
self.mock_execute.return_value.first = Mock(return_value='some_thing')
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp._get_slave_status()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW SLAVE STATUS")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_latest_txn_id(self, *args):
self.mock_execute.return_value.first = Mock(return_value=['some_thing']
)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_latest_txn_id()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_wait_for_txn(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.wait_for_txn('abcd')
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_txn_count(self, *args):
self.mock_execute.return_value.first = Mock(
return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1'])
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_txn_count()
self.assertEqual(1, result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas.packager.pkg_version
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
sqlalchemy.create_engine = self.orig_create_engine
dbaas.packager.pkg_version = self.orig_pkg_version
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_install(self):
self.mySqlApp._install_mysql = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
utils.execute_with_timeout = Mock()
pkg.Package.pkg_install = Mock()
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.install_if_needed(["package"])
self.assertTrue(pkg.Package.pkg_install.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.mySqlApp.secure('contents', None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'generate_random_password',
return_value='some_password')
def test_secure_root(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.secure_root()
update_root_password, _ = self.mock_execute.call_args_list[0]
update_expected = ("UPDATE mysql.user SET Password="
"PASSWORD('some_password') "
"WHERE User = 'root' AND Host = 'localhost';")
remove_root, _ = self.mock_execute.call_args_list[1]
remove_expected = ("DELETE FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(update_expected, update_root_password[0].text,
"Update root password queries are not the same")
self.assertEqual(remove_expected, remove_root[0].text,
"Remove root queries are not the same")
@patch.object(operating_system, 'create_directory')
def test__create_mysql_confd_dir(self, mkdir_mock):
self.mySqlApp._create_mysql_confd_dir()
mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True)
@patch.object(operating_system, 'move')
def test__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
self.assertEqual(3, mock_move.call_count)
@patch.object(operating_system, 'move', side_effect=ProcessExecutionError)
def test_exception__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
# call-count needs to be same as normal,
# because exception is eaten to make the flow goto next file-move.
self.assertEqual(3, mock_move.call_count)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_apply_overrides(self, *args):
overrides = {'sort_buffer_size': 1000000}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.apply_overrides(overrides)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SET GLOBAL sort_buffer_size=1000000")
self.assertEqual(expected, args[0].text,
"Set global statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_make_read_only(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.make_read_only('ON')
args, _ = self.mock_execute.call_args_list[0]
expected = ("set global read_only = ON")
self.assertEqual(expected, args[0].text,
"Set read_only statements are not the same")
def test_install_install_error(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
pkg.Package.pkg_install = \
Mock(side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.mySqlApp.install_if_needed, ["package"])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure_write_conf_error(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock(
side_effect=IOError("Could not write file"))
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.assertRaises(IOError, self.mySqlApp.secure, "foo", None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class TextClauseMatcher(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return "TextClause(%s)" % self.text
def __eq__(self, arg):
print("Matching %s" % arg.text)
return self.text in arg.text
def mock_sql_connection():
utils.execute_with_timeout = MagicMock(return_value=['fake_password',
None])
mock_engine = MagicMock()
sqlalchemy.create_engine = MagicMock(return_value=mock_engine)
mock_conn = MagicMock()
dbaas.LocalSqlClient.__enter__ = MagicMock(return_value=mock_conn)
dbaas.LocalSqlClient.__exit__ = MagicMock(return_value=None)
return mock_conn
class MySqlAppMockTest(testtools.TestCase):
def setUp(self):
super(MySqlAppMockTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlAppMockTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_secure_keep_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
app._write_mycnf = MagicMock(return_value=True)
app.start_mysql = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.secure('foo', None)
self.assertTrue(mock_conn.execute.called)
def test_secure_with_mycnf_error(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_stop': 'service mysql stop'}):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
dbaas.clear_expired_password = MagicMock(return_value=None)
self.assertRaises(TypeError, app.secure, None, None)
self.assertTrue(mock_conn.execute.called)
# At least called twice
self.assertTrue(mock_conn.execute.call_count >= 2)
(mock_status.wait_for_real_status_to_change_to.
assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN,
app.state_change_wait_time, False))
class MySqlRootStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlRootStatusTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlRootStatusTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_root_is_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 1
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess().is_root_enabled(), Is(True))
def test_root_is_not_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 0
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess.is_root_enabled(), Equals(False))
def test_enable_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
# invocation
user_ser = MySqlRootAccess.enable_root()
# verification
self.assertThat(user_ser, Not(Is(None)))
mock_conn.execute.assert_any_call(TextClauseMatcher('CREATE USER'),
user='root', host='%')
mock_conn.execute.assert_any_call(TextClauseMatcher(
'GRANT ALL PRIVILEGES ON *.*'))
mock_conn.execute.assert_any_call(TextClauseMatcher(
'UPDATE mysql.user'))
def test_enable_root_failed(self):
with patch.object(models.MySQLUser, '_is_valid_user_name',
return_value=False):
self.assertRaises(ValueError, MySqlAdmin().enable_root)
class MockStats:
f_blocks = 1024 ** 2
f_bsize = 4096
f_bfree = 512 * 1024
class InterrogatorTest(testtools.TestCase):
def tearDown(self):
super(InterrogatorTest, self).tearDown()
def test_to_gb(self):
result = to_gb(123456789)
self.assertEqual(0.11, result)
def test_to_gb_zero(self):
result = to_gb(0)
self.assertEqual(0.0, result)
def test_get_filesystem_volume_stats(self):
with patch.object(os, 'statvfs', return_value=MockStats):
result = get_filesystem_volume_stats('/some/path/')
self.assertEqual(4096, result['block_size'])
self.assertEqual(1048576, result['total_blocks'])
self.assertEqual(524288, result['free_blocks'])
self.assertEqual(4.0, result['total'])
self.assertEqual(2147483648, result['free'])
self.assertEqual(2.0, result['used'])
def test_get_filesystem_volume_stats_error(self):
with patch.object(os, 'statvfs', side_effect=OSError):
self.assertRaises(
RuntimeError,
get_filesystem_volume_stats, '/nonexistent/path')
class ServiceRegistryTest(testtools.TestCase):
def setUp(self):
super(ServiceRegistryTest, self).setUp()
def tearDown(self):
super(ServiceRegistryTest, self).tearDown()
def test_datastore_registry_with_extra_manager(self):
datastore_registry_ext_test = {
'test': 'trove.guestagent.datastore.test.manager.Manager',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(datastore_registry_ext_test.get('test', None),
test_dict.get('test'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.'
'couchbase.manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_existing_manager(self):
datastore_registry_ext_test = {
'mysql': 'trove.guestagent.datastore.mysql.'
'manager.Manager123',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager123',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_blank_dict(self):
datastore_registry_ext_test = dict()
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
class KeepAliveConnectionTest(testtools.TestCase):
class OperationalError(Exception):
def __init__(self, value):
self.args = [value]
def __str__(self):
return repr(self.value)
def setUp(self):
super(KeepAliveConnectionTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_LOG_err = dbaas.LOG
def tearDown(self):
super(KeepAliveConnectionTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.LOG = self.orig_LOG_err
def test_checkout_type_error(self):
dbapi_con = Mock()
dbapi_con.ping = Mock(side_effect=TypeError("Type Error"))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(TypeError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_disconnection_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(sqlalchemy.exc.DisconnectionError,
self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_operation_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(self.OperationalError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
class BaseDbStatusTest(testtools.TestCase):
def setUp(self):
super(BaseDbStatusTest, self).setUp()
util.init_db()
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(BaseDbStatusTest, self).tearDown()
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_begin_install(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.begin_install()
self.assertEqual(rd_instance.ServiceStatuses.BUILDING,
self.baseDbStatus.status)
def test_begin_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = False
self.baseDbStatus.begin_restart()
self.assertTrue(self.baseDbStatus.restart_mode)
def test_end_install_or_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.SHUTDOWN)
self.baseDbStatus.end_install_or_restart()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
self.baseDbStatus.status)
self.assertFalse(self.baseDbStatus.restart_mode)
def test_is_installed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_none(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = None
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_building(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.BUILDING
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_new(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.NEW
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_failed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.FAILED
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_restarting(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = True
self.assertTrue(self.baseDbStatus._is_restarting)
def test_is_running(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_running)
def test_is_running_not(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.assertFalse(self.baseDbStatus.is_running)
def test_wait_for_real_status_to_change_to(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertTrue(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.RUNNING, 10))
def test_wait_for_real_status_to_change_to_timeout(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertFalse(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.SHUTDOWN, 10))
class MySqlAppStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlAppStatusTest, self).setUp()
util.init_db()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_load_mysqld_options = dbaas.load_mysqld_options
self.orig_dbaas_os_path_exists = dbaas.os.path.exists
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(MySqlAppStatusTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.load_mysqld_options = self.orig_load_mysqld_options
dbaas.os.path.exists = self.orig_dbaas_os_path_exists
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_get_actual_db_status(self):
dbaas.utils.execute_with_timeout = Mock(return_value=(None, None))
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
@patch.object(utils, 'execute_with_timeout',
side_effect=ProcessExecutionError())
@patch.object(os.path, 'exists', return_value=True)
def test_get_actual_db_status_error_crashed(self, mock_exists,
mock_execute):
dbaas.load_mysqld_options = Mock(return_value={})
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
def test_get_actual_db_status_error_shutdown(self):
mocked = Mock(side_effect=ProcessExecutionError())
dbaas.utils.execute_with_timeout = mocked
dbaas.load_mysqld_options = Mock(return_value={})
dbaas.os.path.exists = Mock(return_value=False)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_blocked(self):
dbaas.utils.execute_with_timeout = MagicMock(
side_effect=[ProcessExecutionError(), ("some output", None)])
dbaas.load_mysqld_options = Mock()
dbaas.os.path.exists = Mock(return_value=True)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status)
class TestRedisApp(testtools.TestCase):
def setUp(self):
super(TestRedisApp, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
with patch.multiple(RedisApp, _build_admin_client=DEFAULT,
_init_overrides_dir=DEFAULT):
self.app = RedisApp(state_change_wait_time=0)
self.orig_os_path_isfile = os.path.isfile
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
utils.execute_with_timeout = Mock()
rservice.utils.execute_with_timeout = Mock()
def tearDown(self):
super(TestRedisApp, self).tearDown()
self.app = None
os.path.isfile = self.orig_os_path_isfile
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
rservice.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('bar')
pkg.Package.pkg_is_installed.assert_any_call('bar')
self.assertEqual(0, RedisApp._install_redis.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('asdf')
pkg.Package.pkg_is_installed.assert_any_call('asdf')
RedisApp._install_redis.assert_any_call('asdf')
def test_install_redis(self):
with patch.object(utils, 'execute_with_timeout'):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
self.app._install_redis('redis')
pkg.Package.pkg_install.assert_any_call('redis', {}, 1200)
RedisApp.start_redis.assert_any_call()
self.assertTrue(utils.execute_with_timeout.called)
def test_enable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_enable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_stop_db_without_fail(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_stop_db_with_failure(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_restart(self):
mock_status = MagicMock()
self.app.status = mock_status
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(RedisApp, 'stop_db', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.app.restart()
mock_status.begin_restart.assert_any_call()
RedisApp.stop_db.assert_any_call()
RedisApp.start_redis.assert_any_call()
mock_status.end_install_or_restart.assert_any_call()
def test_start_redis(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self._assert_start_redis(mock_status)
@patch.object(utils, 'execute_with_timeout')
def test_start_redis_with_failure(self, exec_mock):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
mock_status.end_install_or_restart = MagicMock()
self._assert_start_redis(mock_status)
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
run_as_root=True, root_helper='sudo')
mock_status.end_install_or_restart.assert_called_once_with()
@patch.multiple(operating_system, start_service=DEFAULT,
enable_service_on_boot=DEFAULT)
def _assert_start_redis(self, mock_status, start_service,
enable_service_on_boot):
self.app.status = mock_status
self.app.start_redis()
mock_status.wait_for_real_status_to_change_to.assert_called_once_with(
rd_instance.ServiceStatuses.RUNNING, ANY, False)
enable_service_on_boot.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
start_service.assert_called_once_with(RedisSystem.SERVICE_CANDIDATES)
class CassandraDBAppTest(testtools.TestCase):
def setUp(self):
super(CassandraDBAppTest, self).setUp()
self.utils_execute_with_timeout = (
cass_service.utils.execute_with_timeout)
self.sleep = time.sleep
self.pkg_version = cass_service.packager.pkg_version
self.pkg = cass_service.packager
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.cassandra = cass_service.CassandraApp(self.appStatus)
self.orig_unlink = os.unlink
def tearDown(self):
super(CassandraDBAppTest, self).tearDown()
cass_service.utils.execute_with_timeout = (self.
utils_execute_with_timeout)
time.sleep = self.sleep
cass_service.packager.pkg_version = self.pkg_version
cass_service.packager = self.pkg
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_stop_db_error(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.cassandra.stop_db)
def test_restart(self):
self.cassandra.stop_db = Mock()
self.cassandra.start_db = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.restart()
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_runs_forever(self):
cass_service.utils.execute_with_timeout = Mock()
(self.cassandra.status.
wait_for_real_status_to_change_to) = Mock(return_value=False)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.cassandra.stop_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_error(self):
self.cassandra._enable_db_on_boot = Mock()
self.cassandra.state_change_wait_time = 1
cass_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.assertRaises(RuntimeError, self.cassandra.start_db)
def test_install(self):
self.cassandra._install_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra.install_if_needed(['cassandra'])
self.assertTrue(self.cassandra._install_db.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_install_error(self):
self.cassandra.start_db = Mock()
self.cassandra.stop_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra._install_db = Mock(
side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.cassandra.install_if_needed,
['cassandra=1.2.10'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_cassandra_error_in_write_config_verify_unlink(self):
# this test verifies not only that the write_config
# method properly invoked execute, but also that it properly
# attempted to unlink the file (as a result of the exception)
mock_unlink = Mock(return_value=0)
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'this is my configuration'
with patch('trove.guestagent.common.operating_system.move',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.cassandra.write_config,
config_contents=configuration,
execute_function=Mock(),
mkstemp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# really delete the temporary_config_file
os.unlink(temp_config_name)
@patch.multiple('trove.guestagent.common.operating_system',
chown=DEFAULT, chmod=DEFAULT, move=DEFAULT)
def test_cassandra_write_config(self, chown, chmod, move):
# ensure that write_config creates a temporary file, and then
# moves the file to the final place. Also validate the
# contents of the file written.
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'some arbitrary configuration text'
mock_execute = MagicMock(return_value=('', ''))
self.cassandra.write_config(configuration,
execute_function=mock_execute,
mkstemp_function=mock_mkstemp)
move.assert_called_with(temp_config_name, cass_system.CASSANDRA_CONF,
as_root=True)
chown.assert_called_with(cass_system.CASSANDRA_CONF,
"cassandra", "cassandra", recursive=False,
as_root=True)
chmod.assert_called_with(
cass_system.CASSANDRA_CONF, FileMode.ADD_READ_ALL, as_root=True)
self.assertEqual(1, mock_mkstemp.call_count)
with open(temp_config_name, 'r') as config_file:
configuration_data = config_file.read()
self.assertEqual(configuration, configuration_data)
# really delete the temporary_config_file
os.unlink(temp_config_name)
class CouchbaseAppTest(testtools.TestCase):
def fake_couchbase_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchbaseAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchservice.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchbase_service_discovery)
netutils.get_my_ipv4 = Mock()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchbaseApp = couchservice.CouchbaseApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchbaseAppTest, self).tearDown()
couchservice.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchbaseApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchbaseApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.stop_db = Mock()
self.couchbaseApp.start_db = Mock()
self.couchbaseApp.restart()
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
couchservice.utils.execute_with_timeout = mocked
self.couchbaseApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
def test_start_db_runs_forever(self):
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_install_when_couchbase_installed(self):
couchservice.packager.pkg_is_installed = Mock(return_value=True)
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp.install_if_needed(["package"])
self.assertTrue(couchservice.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class CouchDBAppTest(testtools.TestCase):
def fake_couchdb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchdb_service.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchdb_service_discovery)
netutils.get_my_ipv4 = Mock()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchdbApp = couchdb_service.CouchDBApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchDBAppTest, self).tearDown()
couchdb_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchdbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchdbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.stop_db = Mock()
self.couchdbApp.start_db = Mock()
self.couchdbApp.restart()
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp._enable_db_on_boot = Mock()
self.couchdbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.couchdbApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchdbApp.start_db)
def test_install_when_couchdb_installed(self):
couchdb_service.packager.pkg_is_installed = Mock(return_value=True)
couchdb_service.utils.execute_with_timeout = Mock()
self.couchdbApp.install_if_needed(["package"])
self.assertTrue(couchdb_service.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class MongoDBAppTest(testtools.TestCase):
def fake_mongodb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(MongoDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (mongo_service.
utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
self.orig_packager = mongo_system.PACKAGER
self.orig_service_discovery = operating_system.service_discovery
self.orig_os_unlink = os.unlink
operating_system.service_discovery = (
self.fake_mongodb_service_discovery)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mongoDbApp = mongo_service.MongoDBApp(self.appStatus)
time.sleep = Mock()
os.unlink = Mock()
def tearDown(self):
super(MongoDBAppTest, self).tearDown()
mongo_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
time.sleep = self.orig_time_sleep
mongo_system.PACKAGER = self.orig_packager
operating_system.service_discovery = self.orig_service_discovery
os.unlink = self.orig_os_unlink
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stopdb(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_stop_db_error(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mongoDbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.stop_db = Mock()
self.mongoDbApp.start_db = Mock()
self.mongoDbApp.restart()
self.assertTrue(self.mongoDbApp.stop_db.called)
self.assertTrue(self.mongoDbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_with_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db_runs_forever(self):
mongo_service.utils.execute_with_timeout = Mock(
return_value=["ubuntu 17036 0.0 0.1 618960 "
"29232 pts/8 Sl+ Jan29 0:07 mongod", ""])
self.mongoDbApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_start_db_error(self):
self.mongoDbApp._enable_db_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
mongo_service.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
def test_mongodb_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
with patch.object(os.path, 'isfile', return_value=True):
with patch.object(operating_system, 'move',
side_effect=ProcessExecutionError):
self.assertRaises(ProcessExecutionError,
self.mongoDbApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, operating_system.move.call_count)
self.assertEqual(1, os.unlink.call_count)
def test_start_db_with_conf_changes_db_is_running(self):
self.mongoDbApp.start_db = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mongoDbApp.start_db_with_conf_changes,
Mock())
def test_install_when_db_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=True)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_when_db_not_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=False)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
packager_mock.pkg_install.assert_any_call(ANY, {}, ANY)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class VerticaAppStatusTest(testtools.TestCase):
def setUp(self):
super(VerticaAppStatusTest, self).setUp()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
def tearDown(self):
super(VerticaAppStatusTest, self).tearDown()
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def test_get_actual_db_status(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(return_value=['db_srvr', None])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
def test_get_actual_db_status_shutdown(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', None],
['db_srvr', None]])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_crashed(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=ProcessExecutionError('problem'
))):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
class VerticaAppTest(testtools.TestCase):
def setUp(self):
super(VerticaAppTest, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.app = VerticaApp(self.appStatus)
self.setread = VolumeDevice.set_readahead_size
self.Popen = subprocess.Popen
vertica_system.shell_execute = MagicMock(return_value=('', ''))
VolumeDevice.set_readahead_size = Mock()
subprocess.Popen = Mock()
self.test_config = ConfigParser.ConfigParser()
self.test_config.add_section('credentials')
self.test_config.set('credentials',
'dbadmin_password', 'some_password')
def tearDown(self):
super(VerticaAppTest, self).tearDown()
self.app = None
VolumeDevice.set_readahead_size = self.setread
subprocess.Popen = self.Popen
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(0, pkg.Package.pkg_install.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(1, pkg.Package.pkg_install.call_count)
def test_prepare_for_install_vertica(self):
self.app.prepare_for_install_vertica()
arguments = vertica_system.shell_execute.call_args_list[0]
self.assertEqual(1, VolumeDevice.set_readahead_size.call_count)
expected_command = (
"VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce")
arguments.assert_called_with(expected_command)
def test_failure_prepare_for_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
self.app.prepare_for_install_vertica)
def test_install_vertica(self):
with patch.object(self.app, 'write_config',
return_value=None):
self.app.install_vertica(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica'))
arguments.assert_called_with(expected_command)
def test_failure_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaisesRegexp(RuntimeError, 'install_vertica failed.',
self.app.install_vertica,
members='10.0.0.2')
def test_create_db(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.create_db(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr',
'/var/lib/vertica',
'/var/lib/vertica',
'some_password'))
arguments.assert_called_with(expected_command, 'dbadmin')
def test_failure_create_db(self):
with patch.object(self.app, 'read_config',
side_effect=RuntimeError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Vertica database create failed.',
self.app.create_db)
# Because of an exception in read_config there was no shell execution.
self.assertEqual(0, vertica_system.shell_execute.call_count)
def test_vertica_write_config(self):
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=(temp_file_handle))
mock_unlink = Mock(return_value=0)
self.app.write_config(config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': temp_file_handle.name,
'target': vertica_system.VERTICA_CONF})
arguments.assert_called_with(expected_command)
self.assertEqual(1, mock_mkstemp.call_count)
configuration_data = ConfigParser.ConfigParser()
configuration_data.read(temp_file_handle.name)
self.assertEqual(
self.test_config.get('credentials', 'dbadmin_password'),
configuration_data.get('credentials', 'dbadmin_password'))
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_vertica_error_in_write_config_verify_unlink(self):
mock_unlink = Mock(return_value=0)
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=temp_file_handle)
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.app.write_config,
config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_restart(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(VerticaApp, 'stop_db', return_value=None):
with patch.object(VerticaApp, 'start_db', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
VerticaApp.stop_db.assert_any_call()
VerticaApp.start_db.assert_any_call()
def test_start_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.start_db()
agent_start, db_start = subprocess.Popen.call_args_list
agent_expected_command = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')]
db_expected_cmd = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.START_DB % ('db_srvr', 'some_password'))]
self.assertTrue(mock_status.end_install_or_restart.called)
agent_start.assert_called_with(agent_expected_command)
db_start.assert_called_with(db_expected_cmd)
def test_start_db_failure(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot',
side_effect=RuntimeError()):
with patch.object(app, 'read_config',
return_value=self.test_config):
self.assertRaises(RuntimeError, app.start_db)
def test_stop_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.stop_db()
self.assertEqual(
3, vertica_system.shell_execute.call_count)
# There are 3 shell-executions:
# a) stop vertica-agent service
# b) check daatabase status
# c) stop_db
# We are matcing that 3rd command called was stop_db
arguments = vertica_system.shell_execute.call_args_list[2]
expected_cmd = (vertica_system.STOP_DB % ('db_srvr',
'some_password'))
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
arguments.assert_called_with(expected_cmd, 'dbadmin')
def test_stop_db_do_not_start_on_reboot(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=True)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
app.stop_db(do_not_start_on_reboot=True)
self.assertEqual(
3, vertica_system.shell_execute.call_count)
app._disable_db_on_boot.assert_any_call()
def test_stop_db_database_not_running(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
app.stop_db()
# Since database stop command does not gets executed,
# so only 2 shell calls were there.
self.assertEqual(
2, vertica_system.shell_execute.call_count)
def test_stop_db_failure(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.assertRaises(RuntimeError, app.stop_db)
def test_export_conf_to_members(self):
self.app._export_conf_to_members(members=['member1', 'member2'])
self.assertEqual(2, vertica_system.shell_execute.call_count)
def test_fail__export_conf_to_members(self):
app = VerticaApp(MagicMock())
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
app._export_conf_to_members,
['member1', 'member2'])
def test_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_authorize_public_keys_authorized_file_not_exists(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['', '']])):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_fail_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.authorize_public_keys, user, keys)
def test_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
(vertica_system.SSH_KEY_GEN % ('/home/' + user)), user)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/id_rsa.pub')
def test_get_public_keys_if_key_exists(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['some_key', None]])):
key = self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
self.assertEqual('some_key', key)
def test_fail_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.get_public_keys, user)
def test_install_cluster(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.install_cluster(members=['member1', 'member2'])
# Verifying nu,ber of shell calls,
# as command has already been tested in preceeding tests
self.assertEqual(5, vertica_system.shell_execute.call_count)
def test__enable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._enable_db_on_boot()
restart_policy, agent_enable = subprocess.Popen.call_args_list
expected_restart_policy = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))]
expected_agent_enable = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')]
self.assertEqual(2, subprocess.Popen.call_count)
restart_policy.assert_called_with(expected_restart_policy)
agent_enable.assert_called_with(expected_agent_enable)
def test_failure__enable_db_on_boot(self):
with patch.object(subprocess, 'Popen', side_effect=OSError):
self.assertRaisesRegexp(RuntimeError,
'Could not enable db on boot.',
self.app._enable_db_on_boot)
def test__disable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._disable_db_on_boot()
restart_policy, agent_disable = (
vertica_system.shell_execute.call_args_list)
expected_restart_policy = (
vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never'))
expected_agent_disable = (
vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable')
self.assertEqual(2, vertica_system.shell_execute.call_count)
restart_policy.assert_called_with(expected_restart_policy, 'dbadmin')
agent_disable.assert_called_with(expected_agent_disable, 'root')
def test_failure__disable_db_on_boot(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Could not disable db on boot.',
self.app._disable_db_on_boot)
def test_read_config(self):
app = VerticaApp(MagicMock())
with patch.object(ConfigParser, 'ConfigParser',
return_value=self.test_config):
test_config = app.read_config()
self.assertEqual('some_password',
test_config.get('credentials', 'dbadmin_password')
)
def test_fail_read_config(self):
with patch.object(ConfigParser.ConfigParser, 'read',
side_effect=ConfigParser.Error()):
self.assertRaises(RuntimeError, self.app.read_config)
def test_complete_install_or_restart(self):
app = VerticaApp(MagicMock())
app.complete_install_or_restart()
app.status.end_install_or_restart.assert_any_call()
def test_start_db_with_conf_changes(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, 'read_config',
return_value=self.test_config):
app.start_db_with_conf_changes('test_config_contents')
app.status.end_install_or_restart.assert_any_call()
class DB2AppTest(testtools.TestCase):
def setUp(self):
super(DB2AppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.db2App = db2service.DB2App(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(DB2AppTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
self.db2App = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.db2App.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_restart_server(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
mock_status = MagicMock(return_value=None)
app = db2service.DB2App(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.start_db = MagicMock(return_value=None)
app.restart()
self.assertTrue(mock_status.begin_restart.called)
self.assertTrue(app.stop_db.called)
self.assertTrue(app.start_db.called)
def test_start_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
with patch.object(self.db2App, '_enable_db_on_boot',
return_value=None):
self.db2App.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class DB2AdminTest(testtools.TestCase):
def setUp(self):
super(DB2AdminTest, self).setUp()
self.db2Admin = db2service.DB2Admin()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
def tearDown(self):
super(DB2AdminTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
def test_delete_database(self):
with patch.object(
db2service, 'run_command',
MagicMock(
return_value=None,
side_effect=ProcessExecutionError('Error'))):
self.assertRaises(GuestError,
self.db2Admin.delete_database,
FAKE_DB)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 drop database testDB"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_list_databases(self):
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
self.db2Admin.list_databases()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 list database directory " \
"| grep -B6 -i indirect | grep 'Database name' | " \
"sed 's/.*= //'"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_create_users(self):
with patch.object(db2service, 'run_command', MagicMock(
return_value=None)):
db2service.utils.execute_with_timeout = MagicMock(
return_value=None)
self.db2Admin.create_user(FAKE_USER)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE TO USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Granting database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_with_db(self):
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=None)):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[0])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertFalse(db2service.DB2Admin.list_access.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE FROM USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_without_db(self):
FAKE_USER.append(
{"_name": "random2", "_password": "guesswhat", "_databases": []})
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=[FAKE_DB])):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[1])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.DB2Admin.list_access.called)
self.assertTrue(
db2service.utils.execute_with_timeout.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
"DATAACCESS ON DATABASE FROM USER random2; " \
"db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_list_users(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin.list_users()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(expected, args[0],
"List database queries are not the same")
def test_get_user(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin._get_user('random', None)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
| apache-2.0 | 275,580,507,818,071,580 | 42.310604 | 79 | 0.602201 | false |
omegachysis/arche-engine | arche/image.py | 1 | 11087 |
_panda = False
try:
import pygame
from pygame import locals
except:
_panda = True
import logging
log = logging.getLogger("R.Surface")
def scaleImage(surface, width, height):
""" Return surface scaled to fit width and height. """
#log.debug("scaled image %s" % repr(surface))
return pygame.transform.smoothscale(surface, (width, height))
def profilerRecordImageSurfaces():
log.info("PERFORMANCE PROFILER ENGAGED: RecordImageSurfaces")
ImageSurface.debugRecordSurfaces = True
def profilerRevealPixelAlpha():
log.info("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
ImageSurface.debugRevealPixelAlpha = True
for surf in ImageSurface.imageSurfaces:
surf.refresh()
if not ImageSurface.debugRecordSurfaces:
log.warning("PERFORMANCE PROFILER FAILED: Not recording surfaces; "+\
"inconsistancies may occur.")
def createDefaultSurface():
surface = pygame.Surface((1,1))
surface.fill((255,255,255,255))
return surface
newDefaultSurface = createDefaultSurface
def newRectangle(width, height, color = (255,255,255)):
surface = pygame.Surface((width, height))
surface.fill(color)
return surface
class _ImageRect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
class ImageSurfacePanda(object):
def __init__(self, surface, pixelAlpha=True):
if isinstance(surface, str):
self.surface = loader.loadTexture(surface)
def getSurface(self):
return self._surface
def setSurface(self, value):
self._surface = value
self._rect = _ImageRect(0, 0, self.width, self.height)
surface = property(getSurface, setSurface)
def getWidth(self):
return self._surface.getSimpleXSize()
def getHeight(self):
return self._surface.getSimpleYSize()
width = property(getWidth)
height = property(getHeight)
def rect(self):
try:
return self._rect
except:
return None
def refresh(self):
pass
class ImageCanvas(object):
def __init__(self, pygameSurface):
self.composite = pygameSurface.convert()
self.clip = None
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def refresh(self):
pass
def rect(self):
return self.composite.get_rect()
def get(self):
return self.composite
class ImageSurface(object):
imageSurfaces = []
debugRecordSurfaces = False
debugRevealPixelAlpha = False
if debugRevealPixelAlpha:
log.debug("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
def __init__(self, surface, pixelAlpha=True):
if ImageSurface.debugRecordSurfaces:
ImageSurface.imageSurfaces.append(self)
if isinstance(surface, str):
surface = pygame.image.load(surface)
elif isinstance(surface, ImageSurface):
surface = surface.source
if surface:
if not pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
else:
self._surface = None
self.composite = None
self._modScale = None
self._modColor = None
self._pixelAlpha = pixelAlpha
if self._surface:
self._width = self._surface.get_width()
self._height = self._surface.get_height()
else:
self._width = 0
self._height = 0
self._red = 255
self._green = 255
self._blue = 255
self._alpha = 255
if self._surface:
self.refresh()
_clip = None
_clipX = 0
_clipY = 0
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def getPixel(self, x, y):
return self.get().get_at((x,y))
def copy(self):
return ImageSurface(self, self._pixelAlpha)
def resetClip(self):
self.setClip((0,0,self.getWidth(),self.getHeight()))
def removeClip(self):
self.setClip(None)
def getClip(self):
return self._clip
def setClip(self, value):
if value:
self._clipX = value[0]
self._clipY = value[1]
self.applyClip()
self._clip = value
clip = property(getClip, setClip)
def getClipX(self):
return self._clipX
def setClipX(self, value):
if not self._clip:
self.resetClip()
self._clipX = value
clip = self.getClip()
self.setClip((value, clip[1], clip[2], clip[3]))
clipX = property(getClipX, setClipX)
def getClipY(self):
return self._clipY
def setClipY(self, value):
if not self._clip:
self.resetClip()
self._clipY = value
clip = self.getClip()
self.setClip((clip[0], value, clip[2], clip[3]))
clipY = property(getClipY, setClipY)
def setAllowPixelAlpha(self, allowPixelAlpha):
if allowPixelAlpha != self._pixelAlpha:
if allowPixelAlpha:
self._surface = self._surface.convert_alpha()
else:
self._surface = self._surface.convert()
self._pixelAlpha = allowPixelAlpha
def getAllowPixelAlpha(self):
return self._pixelAlpha
allowPixelAlpha = property(getAllowPixelAlpha, setAllowPixelAlpha)
def _revealPixelAlpha(self):
if self._pixelAlpha:
surface = pygame.Surface((self._width, self._height)).convert_alpha()
surface.fill((255,0,0,255))
return surface
else:
surface = pygame.Surface((self._width, self._height)).convert()
surface.fill((0,255,0,255))
return surface
def refresh(self):
""" Apply all modified image parameters. """
if self.source:
self.applyScale()
def replace(self, surface, normalize=True):
""" Replace source surface with another. """
if ImageSurface.debugRevealPixelAlpha:
surface = self._revealPixelAlpha()
if not self._pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
self.refresh()
if normalize:
self.normalize()
def permeate(self):
""" Set the source image surface to the current composite surface. """
self.source = self.composite
def normalize(self):
""" Reset scaling parameters to fit source surface. """
self.size = self._surface.get_size()
def get(self):
""" Get the finished composite surface. """
return self.composite
def rect(self):
""" Get rectangle of compsite surface. """
if self.composite:
return self.composite.get_rect()
else:
return pygame.Rect((0,0,1,1))
def convert(self):
""" Return a converted version of the source surface. """
if not self._pixelAlpha:
return self._surface.convert()
else:
return self._surface.convert_alpha()
def applyScale(self):
# This is a slow pass. Do this as little as possible.
self._modScale = scaleImage(self._surface, int(self._width), int(self._height))
if ImageSurface.debugRevealPixelAlpha:
if self._pixelAlpha:
self._modScale.fill((255,0,0,255))
else:
self._modScale.fill((0,255,0,255))
self.applyColor()
self.applyAlpha()
self.applyClip()
def applyColor(self):
# This is a semi fast pass. Use the scaling slow passed image.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor = self._modScale.convert()
self._modColor.fill((self._red, self._green, self._blue),
None, locals.BLEND_RGB_MULT)
self.applyAlpha()
else:
self._modColor = self._modScale.convert_alpha()
self._modColor.fill((self._red, self._green, self._blue, self._alpha),
None, locals.BLEND_RGBA_MULT)
self.composite = self._modColor
else:
self.composite = self._modScale
def applyAlpha(self):
# This is a fast pass. Use the double passed image from scale and color.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor.set_alpha(self._alpha)
self.composite = self._modColor
else:
self.applyColor()
else:
self.composite = self._modScale
def applyClip(self):
# This is a very fast pass. Use the triple passed image from scale, color, and alpha
image = self._modColor
image.set_clip(self._clip)
self.composite = image
def getSource(self):
return self._surface
def setSource(self, source):
self.replace(source, True)
source = property(getSource, setSource)
image = property(getSource, setSource)
def getWidth(self):
return self._width
def setWidth(self, width):
self._width = width
self.applyScale()
width = property(getWidth, setWidth)
def getHeight(self):
return self._height
def setHeight(self, height):
self._height = height
self.applyScale()
height = property(getHeight, setHeight)
def getSize(self):
return (self._width, self._height)
def setSize(self, size):
self._width = size[0]
self._height = size[1]
self.applyScale()
size = property(getSize, setSize)
def setScale(self, scalar):
self.setSize((self.getWidth() * scalar, self.getHeight() * scalar))
def getRed(self):
return self._red
def setRed(self, red):
self._red = red
self.applyColor()
red = property(getRed, setRed)
def getGreen(self):
return self._green
def setGreen(self, green):
self._green = green
self.applyColor()
green = property(getGreen, setGreen)
def getBlue(self):
return self._blue
def setBlue(self, blue):
self._blue = blue
self.applyColor()
blue = property(getBlue, setBlue)
def getAlpha(self):
return self._alpha
def setAlpha(self, alpha):
self._alpha = alpha
self.applyAlpha()
alpha = property(getAlpha, setAlpha)
def getColor(self):
return (self._red, self._green, self._blue)
def setColor(self, color):
self._red = color[0]
self._green = color[1]
self._blue = color[2]
self.applyColor()
color = property(getColor, setColor)
if _panda:
ImageSurface = ImageSurfacePanda | apache-2.0 | 5,942,455,946,340,129,000 | 29.378082 | 93 | 0.590872 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/battle/DistributedBattleFinal.py | 1 | 7784 | from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from direct.actor import Actor
from toontown.distributed import DelayDelete
from direct.directnotify import DirectNotifyGlobal
import DistributedBattleBase
import MovieUtil
from toontown.suit import Suit
import SuitBattleGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase import ToontownGlobals
from direct.fsm import State
import random
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
class DistributedBattleFinal(DistributedBattleBase.DistributedBattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleFinal')
def __init__(self, cr):
townBattle = cr.playGame.hood.loader.townBattle
DistributedBattleBase.DistributedBattleBase.__init__(self, cr, townBattle)
self.setupCollisions(self.uniqueBattleName('battle-collide'))
self.bossCog = None
self.bossCogRequest = None
self.streetBattle = 0
self.joiningSuitsName = self.uniqueBattleName('joiningSuits')
self.fsm.addState(State.State('ReservesJoining', self.enterReservesJoining, self.exitReservesJoining, ['WaitForJoin']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('ReservesJoining')
waitForJoinState = self.fsm.getStateNamed('WaitForJoin')
waitForJoinState.addTransition('ReservesJoining')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('ReservesJoining')
return
def generate(self):
DistributedBattleBase.DistributedBattleBase.generate(self)
def disable(self):
DistributedBattleBase.DistributedBattleBase.disable(self)
base.cr.relatedObjectMgr.abortRequest(self.bossCogRequest)
self.bossCogRequest = None
self.bossCog = None
return
def delete(self):
DistributedBattleBase.DistributedBattleBase.delete(self)
self.removeCollisionData()
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
if base.cr.doId2do.has_key(bossCogId):
tempBossCog = base.cr.doId2do[bossCogId]
self.__gotBossCog([tempBossCog])
else:
self.notify.debug('doing relatedObjectMgr.request for bossCog')
self.bossCogRequest = base.cr.relatedObjectMgr.requestObjects([bossCogId], allCallback=self.__gotBossCog)
def __gotBossCog(self, bossCogList):
self.bossCogRequest = None
self.bossCog = bossCogList[0]
currStateName = self.localToonFsm.getCurrentState().getName()
if currStateName == 'NoLocalToon' and self.bossCog.hasLocalToon():
self.enableCollision()
return
def setBattleNumber(self, battleNumber):
self.battleNumber = battleNumber
def setBattleSide(self, battleSide):
self.battleSide = battleSide
def setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp):
if self.battleCleanedUp():
return
oldtoons = DistributedBattleBase.DistributedBattleBase.setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp)
if len(self.toons) == 4 and len(oldtoons) < 4:
self.notify.debug('setMembers() - battle is now full of toons')
self.closeBattleCollision()
elif len(self.toons) < 4 and len(oldtoons) == 4:
self.openBattleCollision()
def makeSuitJoin(self, suit, ts):
self.notify.debug('makeSuitJoin(%d)' % suit.doId)
self.joiningSuits.append(suit)
if self.hasLocalToon():
self.d_joinDone(base.localAvatar.doId, suit.doId)
def showSuitsJoining(self, suits, ts, name, callback):
if self.bossCog == None:
return
if self.battleSide:
openDoor = Func(self.bossCog.doorB.request, 'open')
closeDoor = Func(self.bossCog.doorB.request, 'close')
else:
openDoor = Func(self.bossCog.doorA.request, 'open')
closeDoor = Func(self.bossCog.doorA.request, 'close')
suitTrack = Parallel()
delay = 0
for suit in suits:
suit.setState('Battle')
if suit.dna.dept == 'l':
suit.reparentTo(self.bossCog)
suit.setPos(0, 0, 0)
suit.setPos(self.bossCog, 0, 0, 0)
suit.headsUp(self)
suit.setScale(3.8 / suit.height)
if suit in self.joiningSuits:
i = len(self.pendingSuits) + self.joiningSuits.index(suit)
destPos, h = self.suitPendingPoints[i]
destHpr = VBase3(h, 0, 0)
else:
destPos, destHpr = self.getActorPosHpr(suit, self.suits)
suitTrack.append(Track((delay, self.createAdjustInterval(suit, destPos, destHpr)), (delay + 1.5, suit.scaleInterval(1.5, 1))))
delay += 1
if self.hasLocalToon():
camera.reparentTo(self)
if random.choice([0, 1]):
camera.setPosHpr(20, -4, 7, 60, 0, 0)
else:
camera.setPosHpr(-20, -4, 7, -60, 0, 0)
done = Func(callback)
track = Sequence(openDoor, suitTrack, closeDoor, done, name=name)
track.start(ts)
self.storeInterval(track, name)
return
def __playReward(self, ts, callback):
toonTracks = Parallel()
for toon in self.toons:
toonTracks.append(Sequence(Func(toon.loop, 'victory'), Wait(FLOOR_REWARD_TIMEOUT), Func(toon.loop, 'neutral')))
name = self.uniqueName('floorReward')
track = Sequence(toonTracks, name=name)
if self.hasLocalToon():
camera.setPos(0, 0, 1)
camera.setHpr(180, 10, 0)
track += [self.bossCog.makeEndOfBattleMovie(self.hasLocalToon()), Func(callback)]
self.storeInterval(track, name)
track.start(ts)
def enterReward(self, ts):
self.notify.debug('enterReward()')
self.disableCollision()
self.delayDeleteMembers()
self.__playReward(ts, self.__handleFloorRewardDone)
return None
def __handleFloorRewardDone(self):
return None
def exitReward(self):
self.notify.debug('exitReward()')
self.clearInterval(self.uniqueName('floorReward'), finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
for toon in self.toons:
toon.startSmooth()
return None
def enterResume(self, ts = 0):
if self.hasLocalToon():
self.removeLocalToon()
self.fsm.requestFinalState()
def exitResume(self):
return None
def enterReservesJoining(self, ts = 0):
self.delayDeleteMembers()
self.showSuitsJoining(self.joiningSuits, ts, self.joiningSuitsName, self.__reservesJoiningDone)
def __reservesJoiningDone(self):
self._removeMembersKeep()
self.doneBarrier()
def exitReservesJoining(self):
self.clearInterval(self.joiningSuitsName)
def enterNoLocalToon(self):
self.notify.debug('enterNoLocalToon()')
if self.bossCog != None and self.bossCog.hasLocalToon():
self.enableCollision()
else:
self.disableCollision()
return
def exitNoLocalToon(self):
self.disableCollision()
return None
def enterWaitForServer(self):
self.notify.debug('enterWaitForServer()')
return None
def exitWaitForServer(self):
return None
| apache-2.0 | -5,185,984,711,244,480,000 | 37.534653 | 223 | 0.656475 | false |
mkieszek/jobsplus | jobsplus_recruitment/wizard/jp_recruiter2deal.py | 1 | 4127 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 09:48:26 2013
@author: mbereda
"""
from openerp.osv import osv,fields
from openerp.tools.translate import _
import pdb
import datetime
class jp_recruiter2deal(osv.Model):
_name = 'jp.recruiter2deal'
_columns = {
'deal_id': fields.many2one('jp.deal','Deal', readonly=True),
'recruiter_id': fields.many2one('res.users', 'Recruiter', required=True),
'date_middle': fields.date('Middle date', track_visibility='onchange'),
'handover_date': fields.date('Handover date', track_visibility='onchange'),
}
def default_get(self, cr, uid, deal_id, context=None):
"""
This function gets default values
"""
#pdb.set_trace()
res = super(jp_recruiter2deal, self).default_get(cr, uid, deal_id, context=context)
deal_id = context and context.get('active_id', False) or False
res.update({'deal_id': deal_id or False})
deal = self.pool.get('jp.deal').browse(cr,uid,[deal_id], context=context)[0]
recruiter_id = deal.recruiter_id and deal.recruiter_id.id or False
res.update({'recruiter_id': recruiter_id})
date_middle = deal.date_middle
res.update({'date_middle': date_middle})
res.update({'handover_date': deal.handover_date})
return res
def assign_recruiter(self, cr, uid, deal_id, context=None):
#pdb.set_trace()
w = self.browse(cr, uid, deal_id, context=context)[0]
deal_id = context and context.get('active_id', False) or False
values = {
'recruiter_id': w.recruiter_id and w.recruiter_id.id or False,
'date_middle': w.date_middle,
'handover_date': w.handover_date,
}
recruiter_id = w.recruiter_id.id
recruiter = self.pool.get('res.users').browse(cr, uid, [recruiter_id])[0]
deal_obj = self.pool.get('jp.deal')
deal_obj.write(cr, uid, [deal_id], values, context=context)
mail_to = recruiter.email
if mail_to is not "":
deal = deal_obj.browse(cr, uid, deal_id)
users_obj = self.pool.get('res.users')
jp_config_obj = self.pool.get('jp.config.settings')
jp_config_id = jp_config_obj.search(cr, uid, [])[-1]
jp_crm = jp_config_obj.browse(cr, uid, jp_config_id).jobsplus_crm
url = ("http://%s/?db=%s#id=%s&view_type=form&model=jp.deal")%(jp_crm, cr.dbname, deal.id)
subject = "Recruiter %s assigned to deal %s"
body = "Recruiter has been assigned to deal.<br/>Recruiter: %s<br/>Deal: %s<br/>Middle date: %s<br/>Finish date: %s<br/><a href='%s'>Link to Deal</a>"
uid = users_obj.search(cr, uid, [('id','=',1)])[0]
uid_id = users_obj.browse(cr, uid, uid)
translation_obj = self.pool.get('ir.translation')
if w.recruiter_id.partner_id.lang == 'pl_PL':
transl = translation_obj.search(cr, uid, [('src','=',body)])
transl_sub = translation_obj.search(cr, uid, [('src','=',subject)])
if transl:
trans = translation_obj.browse(cr, uid, transl)[0]
body = trans.value
if transl_sub:
trans_sub = translation_obj.browse(cr, uid, transl_sub)[0]
subject = trans_sub.value
email_from = uid_id.partner_id.name+"<"+uid_id.partner_id.email+">"
vals = {'email_from': email_from,
'email_to': mail_to,
'state': 'outgoing',
'subject': subject % (w.recruiter_id.name, deal.title),
'body_html': body % (deal.recruiter_id.name, deal.name, deal.date_middle, deal.handover_date, url),
'auto_delete': True}
self.pool.get('mail.mail').create(cr, uid, vals, context=context) | agpl-3.0 | -6,351,029,749,241,071,000 | 42.452632 | 162 | 0.53889 | false |
atmtools/typhon | doc/example_google.py | 1 | 8646 | # -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module_level_variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Args`` section. The name
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
Parameter types -- if given -- should be specified according to
`PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name (type): description
The description may span multiple lines. Following
lines should be indented. The "(type)" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Args:
param1 (int): The first parameter.
param2 (Optional[str]): The second parameter. Defaults to None.
Second line of description should be indented.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
The return type is optional and may be specified at the beginning of
the ``Returns`` section followed by a colon.
The ``Returns`` section may span multiple lines and paragraphs.
Following lines should be indented to match the first line.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError: If `param2` is equal to `param1`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
if param1 == param2:
raise ValueError('param1 may not be equal to param2')
return True
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Args:
n (int): The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int: The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
for i in range(n):
yield i
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str): Human readable string describing the exception.
code (Optional[int]): Error code.
Attributes:
msg (str): Human readable string describing the exception.
code (int): Exception error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attribute and property types -- if given -- should be specified according
to `PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (Optional[int]): Description of `attr2`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (Optional[int]): Description of `param2`. Multiple
lines are supported.
param3 (List[str]): Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: List[str]: Doc comment *before* attribute, with type specified
self.attr4 = ['attr4']
self.attr5 = None
"""Optional[str]: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return 'readonly_property'
@property
def readwrite_property(self):
"""List[str]: Properties with both a getter and setter should only
be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ['readwrite_property']
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output.
This behavior can be disabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = False
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
| mit | -1,395,894,020,954,367,700 | 30.67033 | 79 | 0.64978 | false |
rjenc29/numerical | course/matplotlib/examples/fill_example.py | 1 | 2229 | """
Illustrate different ways of using the various fill functions.
"""
import numpy as np
import matplotlib.pyplot as plt
import example_utils
def main():
fig, axes = example_utils.setup_axes()
fill_example(axes[0])
fill_between_example(axes[1])
stackplot_example(axes[2])
example_utils.title(fig, 'fill/fill_between/stackplot: Filled polygons',
y=0.95)
fig.savefig('fill_example.png', facecolor='none')
plt.show()
def fill_example(ax):
# Use fill when you want a simple filled polygon between vertices
x, y = fill_data()
ax.fill(x, y, color='lightblue')
ax.margins(0.1)
example_utils.label(ax, 'fill')
def fill_between_example(ax):
# Fill between fills between two curves or a curve and a constant value
# It can be used in several ways. We'll illustrate a few below.
x, y1, y2 = sin_data()
# The most basic (and common) use of fill_between
err = np.random.rand(x.size)**2 + 0.1
y = 0.7 * x + 2
ax.fill_between(x, y + err, y - err, color='orange')
# Filling between two curves with different colors when they cross in
# different directions
ax.fill_between(x, y1, y2, where=y1>y2, color='lightblue')
ax.fill_between(x, y1, y2, where=y1<y2, color='forestgreen')
# Note that this is fillbetween*x*!
ax.fill_betweenx(x, -y1, where=y1>0, color='red', alpha=0.5)
ax.fill_betweenx(x, -y1, where=y1<0, color='blue', alpha=0.5)
ax.margins(0.15)
example_utils.label(ax, 'fill_between/x')
def stackplot_example(ax):
# Stackplot is equivalent to a series of ax.fill_between calls
x, y = stackplot_data()
ax.stackplot(x, y.cumsum(axis=0), alpha=0.5)
example_utils.label(ax, 'stackplot')
#-- Data generation ----------------------
def stackplot_data():
x = np.linspace(0, 10, 100)
y = np.random.normal(0, 1, (5, 100))
y = y.cumsum(axis=1)
y -= y.min(axis=0, keepdims=True)
return x, y
def sin_data():
x = np.linspace(0, 10, 100)
y = np.sin(x)
y2 = np.cos(x)
return x, y, y2
def fill_data():
t = np.linspace(0, 2*np.pi, 100)
r = np.random.normal(0, 1, 100).cumsum()
r -= r.min()
return r * np.cos(t), r * np.sin(t)
main()
| mit | -5,428,542,155,974,001,000 | 27.948052 | 76 | 0.623598 | false |
seninp/saxpy | saxpy/hotsax.py | 1 | 4860 | """Implements HOT-SAX."""
import numpy as np
from saxpy.znorm import znorm
from saxpy.sax import sax_via_window
from saxpy.distance import euclidean
def find_discords_hotsax(series, win_size=100, num_discords=2, alphabet_size=3,
paa_size=3, znorm_threshold=0.01, sax_type='unidim'):
"""HOT-SAX-driven discords discovery."""
discords = list()
global_registry = set()
# Z-normalized versions for every subsequence.
znorms = np.array([znorm(series[pos: pos + win_size], znorm_threshold) for pos in range(len(series) - win_size + 1)])
# SAX words for every subsequence.
sax_data = sax_via_window(series, win_size=win_size, paa_size=paa_size, alphabet_size=alphabet_size,
nr_strategy=None, znorm_threshold=0.01, sax_type=sax_type)
"""[2.0] build the 'magic' array"""
magic_array = list()
for k, v in sax_data.items():
magic_array.append((k, len(v)))
"""[2.1] sort it ascending by the number of occurrences"""
magic_array = sorted(magic_array, key=lambda tup: tup[1])
while len(discords) < num_discords:
best_discord = find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms)
if -1 == best_discord[0]:
break
discords.append(best_discord)
mark_start = max(0, best_discord[0] - win_size + 1)
mark_end = best_discord[0] + win_size
for i in range(mark_start, mark_end):
global_registry.add(i)
return discords
def find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms):
"""Find the best discord with hotsax."""
"""[3.0] define the key vars"""
best_so_far_position = -1
best_so_far_distance = 0.
distance_calls = 0
visit_array = np.zeros(len(series), dtype=np.int)
"""[4.0] and we are off iterating over the magic array entries"""
for entry in magic_array:
"""[5.0] current SAX words and the number of other sequences mapping to the same SAX word."""
curr_word = entry[0]
occurrences = sax_data[curr_word]
"""[6.0] jumping around by the same word occurrences makes it easier to
nail down the possibly small distance value -- so we can be efficient
and all that..."""
for curr_pos in occurrences:
if curr_pos in global_registry:
continue
"""[7.0] we don't want an overlapping subsequence"""
mark_start = curr_pos - win_size + 1
mark_end = curr_pos + win_size
visit_set = set(range(mark_start, mark_end))
"""[8.0] here is our subsequence in question"""
cur_seq = znorms[curr_pos]
"""[9.0] let's see what is NN distance"""
nn_dist = np.inf
do_random_search = True
"""[10.0] ordered by occurrences search first"""
for next_pos in occurrences:
"""[11.0] skip bad pos"""
if next_pos in visit_set:
continue
else:
visit_set.add(next_pos)
"""[12.0] distance we compute"""
dist = euclidean(cur_seq, znorms[next_pos])
distance_calls += 1
"""[13.0] keep the books up-to-date"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
do_random_search = False
break
"""[13.0] if not broken above,
we shall proceed with random search"""
if do_random_search:
"""[14.0] build that random visit order array"""
curr_idx = 0
for i in range(0, (len(series) - win_size + 1)):
if not(i in visit_set):
visit_array[curr_idx] = i
curr_idx += 1
it_order = np.random.permutation(visit_array[0:curr_idx])
curr_idx -= 1
"""[15.0] and go random"""
while curr_idx >= 0:
rand_pos = it_order[curr_idx]
curr_idx -= 1
dist = euclidean(cur_seq, znorms[rand_pos])
distance_calls += 1
"""[16.0] keep the books up-to-date again"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
nn_dist = dist
break
"""[17.0] and BIGGER books"""
if (nn_dist > best_so_far_distance) and (nn_dist < np.inf):
best_so_far_distance = nn_dist
best_so_far_position = curr_pos
return best_so_far_position, best_so_far_distance
| gpl-2.0 | 5,871,183,956,478,835,000 | 33.721429 | 121 | 0.531893 | false |
sighill/shade_app | apis/raw/017_raw/017_cleaner.py | 1 | 1278 | # -*- coding: utf-8 -*-
# 017_cleaner.py
# CODED TO BE EXECUTED SERVER SIDE :
# cd /home/common/shade
# python3 manage.py shell
import sys
from apis.voca import *
##################################
# Init des paths et noms de fichiers
AddLog('title' , 'Début du nettoyage du fichier')
work_dir = '/home/common/shade/apis/raw/017_raw/'
# Nom du fichier source
raw_file = 'src'
##################################
# Création de la liste brute
with open(work_dir + raw_file , 'r') as file:
raw_list = [i for i in file.read().splitlines()]
'''
##################################
# Formatage du texte
# Init de la list contenant la sortie de StringFormatter
AddLog('subtitle' , 'Début de la fonction StringFormatter')
formatted_list = [StringFormatter(line) for line in raw_list]
##################################
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( formatted_list )
'''
ref_list = raw_list
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('/home/common/shade/apis/out/','017_src',ref_list,'AssetPlace;Pays clémentin , Ravénie , Lombrie') | mit | -8,009,218,853,207,783,000 | 30.641026 | 112 | 0.605822 | false |
reprah/shy | shy.py | 1 | 2992 | #!/usr/bin/env python
import sys, os, re, subprocess
# begin loop:
# - reading from stdin
# - forking a child
# - executing a new process in the child
def main():
while True:
sys.stdout.write(os.environ['PROMPT'])
line = sys.stdin.readline()
commands = split_on_pipes(line)
placeholder_in = sys.stdin
placeholder_out = sys.stdout
pipe = []
pids = []
for line in commands:
args = [expand(string) for string in line.split()]
command = args[0]
if command in BUILTINS:
# run built-in instead of doing fork + exec
run_builtin(command, args)
else:
# if command is not the last command
if (commands.index(line) + 1) < len(commands):
pipe = os.pipe() # file descriptors
placeholder_out = pipe[1]
else:
placeholder_out = sys.stdout
pid = fork_and_exec(command, args, placeholder_out, placeholder_in)
pids.append(pid)
if type(placeholder_out) is int:
os.close(placeholder_out)
if type(placeholder_in) is int:
os.close(placeholder_in)
if commands.index(line) > 0:
placeholder_in = pipe[0]
for id in pids:
wait_for_child(id)
def wait_for_child(pid):
try:
os.waitpid(pid, 0)
except:
None
# returns PID of child process
def fork_and_exec(command, args, placeholder_out, placeholder_in):
pid = os.fork()
if pid == 0: # inside child process
if type(placeholder_out) is int:
sys.stdout = os.fdopen(placeholder_out, 'w')
os.close(placeholder_out)
if type(placeholder_in) is int:
sys.stdin = os.fdopen(placeholder_in, 'r')
os.close(placeholder_in)
try:
os.execvp(command, args) # actual exec
except:
print "%s: command not found" % command
sys.exit(1) # exit child
return pid
def run_builtin(command, args):
try:
BUILTINS[command](args[1])
except:
BUILTINS[command]()
# returns an array of command strings
def split_on_pipes(line):
matches = re.findall("([^\"'|]+)|[\"']([^\"']+)[\"']", line)
commands = []
for match in matches:
for string in match:
if string != '':
commands.append(string.strip())
return commands
# support different types of expansion
def expand(string):
# variable expansion
if re.match("\$\w+", string):
return os.environ[string[1:]]
# arithmetic expansion
elif re.match("\$\(\(([\w\W\s]*)\)\)", string):
expr = re.match("\$\(\(([\w\W\s]*)\)\)", string).group(1)
return str(eval(expr))
# command expansion
elif re.match("\$\(([\w\W\s]*)\)", string):
expr = re.match("\$\(([\w\W\s]*)\)", string).group(1)
p = subprocess.Popen([expr], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# get the output of the command
out, _ = p.communicate()
return out[0:-1]
else:
return string
def set(args):
key, value = args.split('=')
os.environ[key] = value
BUILTINS = {
'cd': lambda path: os.chdir(''.join(path)),
'exit': lambda exit_code=0: sys.exit(int(exit_code)),
'set': lambda args: set(args) # can't do variable assignment in Python lambda
}
os.environ['PROMPT'] = "=> "
main()
| mit | 7,146,024,717,470,157,000 | 22.559055 | 78 | 0.642714 | false |
csiro-rds/casda-samples | cutouts_by_proj.py | 1 | 7634 | #############################################################################################
#
# Python script to demonstrate interacting with CASDA's SODA implementation to
# retrieve cutout images around a list of sources.
#
# This script creates a job to produce and download cutouts from the specified image at
# the positions provided in an input file (each line has an RA and DEC).
#
# Author: James Dempsey on 16 Apr 2016
#
# Written for python 2.7
# Note: astropy is available on galaxy via 'module load astropy'
# On other machines, try Anaconda https://www.continuum.io/downloads
#
# Modified: MH on 18th Dec 2020
# Take in proj name in TAP query of images. Proj argument should be text snippet of the project name in obscore. e.g. EMU for EMU, Rapid for RACS.
# Also now does RA and Dec search in the TAP query of images (not just in the SODA cutout command).
#
# Modified:MH on 12th Apr
# Use s_region for image region. This may contain NaN pixel borders, but better representation of whether a point is within an image. (Previously used fixed radius from image centre).
#
# Example usage:
# python cutouts_by_proj.py OPAL-username Rapid mysources.txt racs_output 0.1
# For RACS cutouts, with list of positions in a file mysources,txt, and cutout radius 0.1 degrees.
#
#############################################################################################
from __future__ import print_function, division, unicode_literals
import argparse
import os
from astropy.io import votable
from astropy.coordinates import SkyCoord
from astropy import units
import casda
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(description="Download cutouts of specific locations from the specified image")
parser.add_argument("opal_username",
help="Your user name on the ATNF's online proposal system (normally an email address)")
parser.add_argument("-p", "--opal_password", help="Your password on the ATNF's online proposal system")
parser.add_argument("--password_file", help="The file holding your password for the ATNF's online proposal system")
parser.add_argument("proj", help="The text in project name, e.g. EMU, or Rapid ")
parser.add_argument("source_list_file",
help="The file holding the list of positions, with one RA and Dec pair per line.")
parser.add_argument("destination_directory", help="The directory where the resulting files will be stored")
parser.add_argument("radius", help="Radius, in degrees, of the cutouts")
args = parser.parse_args()
return args
def parse_sources_file(filename):
"""
Read in a file of sources, with one source each line. Each source is specified as a
right ascension and declination pair separated by space.
e.g.
1:34:56 -45:12:30
320.20 -43.5
:param filename: The name of the file contining the list of sources
:return: A list of SkyCoord objects representing the parsed sources.
"""
sourcelist = []
with open(filename, 'r') as f:
for line in f:
if line and line[0] != '#':
parts = line.split()
if len(parts) > 1:
if parts[0].find(':') > -1 or parts[0].find('h') > -1:
sky_loc = SkyCoord(parts[0], parts[1], frame='icrs',
unit=(units.hourangle, units.deg))
else:
sky_loc = SkyCoord(parts[0], parts[1], frame='icrs',
unit=(units.deg, units.deg))
sourcelist.append(sky_loc)
return sourcelist
def produce_cutouts(source_list, proj, username, password, destination_dir, cutout_radius_degrees):
# Use CASDA VO (secure) to query for the images associated with the given scheduling_block_id
print ("\n\n** Retreiving image details for %s ... \n\n" % proj)
filename = destination_dir + str(proj) + ".xml"
#Do initial filter of images, allow for 3 deg cone around position (get ASKAP image which is ~30 sq deg).
src_num = 0
for sky_loc in source_list:
src_num = src_num + 1
ra = sky_loc.ra.degree
dec = sky_loc.dec.degree
data_product_id_query = "select * from ivoa.obscore where obs_collection LIKE '%" + proj + \
"%' and dataproduct_subtype = 'cont.restored.t0' and pol_states = '/I/' and 1 = CONTAINS(POINT('ICRS',"+ str(ra) + ","+ str(dec) + "),s_region)"
casda.sync_tap_query(data_product_id_query, filename, username=username, password=password)
image_cube_votable = votable.parse(filename, pedantic=False)
results_array = image_cube_votable.get_table_by_id('results').array
# For each of the image cubes, query datalink to get the secure datalink details
print ("\n\n** Retrieving datalink for each image containing source number " + str(src_num) + " ...\n\n")
authenticated_id_tokens = []
for image_cube_result in results_array:
image_cube_id = image_cube_result['obs_publisher_did'].decode('utf-8')
async_url, authenticated_id_token = casda.get_service_link_and_id(image_cube_id, username,
password,
service='cutout_service',
destination_dir=destination_dir)
if authenticated_id_token is not None:
authenticated_id_tokens.append(authenticated_id_token)
if len(authenticated_id_tokens) == 0:
print ("No image cubes found")
return 1
# Create the async job
job_location = casda.create_async_soda_job(authenticated_id_tokens)
# For each entry in the results of the catalogue query, add the position filter as a parameter to the async job
cutout_filters = []
circle = "CIRCLE " + str(ra) + " " + str(dec) + " " + str(cutout_radius_degrees)
cutout_filters.append(circle)
casda.add_params_to_async_job(job_location, 'pos', cutout_filters)
# Run the job
status = casda.run_async_job(job_location)
# Download all of the files, or alert if it didn't complete
if status == 'COMPLETED':
print ("\n\n** Downloading results...\n\n")
casda.download_all(job_location, destination_dir)
returnflag = 0
else:
print ("Job did not complete: Status was %s." % status)
returnflag = 1
if returnflag == 0:
return 0
else:
return 1
def main():
args = parseargs()
password = casda.get_opal_password(args.opal_password, args.password_file)
# Change this to choose which environment to use, prod is the default
# casda.use_dev()
destination_dir = args.destination_directory + "/" + str(args.proj) + "/" # directory where files will be saved
# 1) Read in the list of sources
print ("\n\n** Parsing the source list ...\n")
source_list = parse_sources_file(args.source_list_file)
print ("\n** Read %d sources...\n\n" % (len(source_list)))
# 2) Create the destination directory
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
# Do the work
return produce_cutouts(source_list, args.proj, args.opal_username, password, destination_dir, args.radius)
if __name__ == '__main__':
exit(main())
| apache-2.0 | 4,055,497,068,719,523,000 | 44.712575 | 184 | 0.61344 | false |
tjhunter/phd-thesis-tjhunter | python/kdd/plot_network.py | 1 | 1065 |
__author__ = 'tjhunter'
import build
import json
import pylab as pl
from matplotlib.collections import LineCollection
# Draws the network as a pdf and SVG file.
def draw_network(ax, fd, link_style):
def decode_line(l):
#print l
dct = json.loads(l)
lats = dct['lats']
lons = dct['lons']
return zip(lons, lats)
lines = [decode_line(l) for l in fd]
#print lines
xmin = min([x for l in lines for x,y in l])
xmax = max([x for l in lines for x,y in l])
ymin = min([y for l in lines for x,y in l])
ymax = max([y for l in lines for x,y in l])
lc = LineCollection(lines, **link_style)
ax.add_collection(lc, autolim=True)
return ((xmin,xmax),(ymin,ymax))
fname = build.data_name('kdd/net_export_6.json')
fig = pl.figure("fig1",figsize=(10,10))
ax = fig.gca()
ax.set_axis_off()
style = {'colors':'k','linewidths':0.5}
with open(fname) as f:
(xlims, ylims) = draw_network(ax, f, style)
ax.set_xlim(*xlims)
ax.set_ylim(*ylims)
# Saving in pdf is a bit slow
build.save_figure(fig, 'figures-kdd/network_export_6',save_svg=True)
| apache-2.0 | -1,207,750,359,825,550,300 | 26.307692 | 68 | 0.656338 | false |
interalia/cmsplugin_availablejobs | availablejob/views.py | 1 | 3330 | from django.views.generic.simple import direct_to_template
from django.views.generic.list_detail import object_detail
from django.contrib.sites.models import Site
from django.shortcuts import get_object_or_404
from django.contrib import messages
from models import EnableOpening, Opening, Candidate
from forms import ApplyForm
import datetime
import hashlib
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
def __create_candidate(form,op,file):
name = form.cleaned_data["name"]
email = form.cleaned_data["email"]
phone = form.cleaned_data["phone"]
cv = form.cleaned_data["cv"]
uploaded_file(file)
candidate, created = Candidate.objects.get_or_create(name = name, email = email)
if op:
candidate.opening = op
candidate.phone = phone
candidate.cv = cv
candidate.save()
return candidate
def index(request):
eopen = EnableOpening.objects.all()
for i in eopen:
print i
form = ApplyForm()
post=False
ios=mobile(request)
d = {"opens": eopen,'form': form,'post':post,'mobile':ios}
if request.method == "POST":
form= ApplyForm(request.POST,request.FILES)
if form.is_valid():
post=True
d.update({"form":form,"post":post})
name = form.cleaned_data.get("opening")
opening = EnableOpening.objects.filter(opening__title = name)
'''
for i in opening:
__create_candidate(form,i,request.FILES['cv'])
'''
else:
d.update({"form":form,"post":post})
return direct_to_template(request, template="vacancy/index.html",extra_context=d)
def detail(request, id):
qs = EnableOpening.objects.all()
d = {"queryset": qs, 'object_id': int(id), 'template_name': "vacancy/opening_detail.html" }
return object_detail(request,**d)
def _process_cv(request,opening):
applyform = ApplyForm()
if request.method == "POST":
form= ApplyForm(request.POST)
if form.is_valid():
vacante=form.save(commit=False)
vacante.save()
return direct_to_template(request, template = "vacancy/job_submit_success.html")
else:
return direct_to_template(request, template = "vacancy/job_form.html")
def show_form(request, id):
opening = get_object_or_404(EnableOpening, id = id)
return _process_cv(request, opening)
def send_cv(request):
opening = None
return _process_cv(request,opening)
def facebook(request):
ops = EnableOpening.objects.all()
today = hashlib.md5(str(datetime.datetime.now()))
SITE = Site.objects.get_current()
d = {"enable_openings": ops, "today": today.hexdigest(), 'SITE': SITE}
return direct_to_template(request, "vacancy/facebook.html", extra_context = d)
def mobile(request):
device = {}
mobile=False
ua=request.META.get('HTTP_USER_AGENT','').lower()
if ua.find("iphone") > 0:
mobile= True
if ua.find("ipad") > 0:
mobile= True
if mobile:
return True
else:
return False
def uploaded_file(filename):
fd=open(settings.MEDIA_CV+str(filename),'wb+')
for chunk in filename.chunks():
fd.write(chunk)
fd.close()
| bsd-3-clause | 7,716,668,419,080,434,000 | 30.415094 | 95 | 0.63033 | false |
calio/cflow2dot | cflow2dot.py | 1 | 9602 | #!/usr/bin/env python
import os.path
import sys
import subprocess
import re
import argparse
import json
from sys import exit
from os import system
cflow_path = "/usr/local/bin/cflow"
dot_path = "/usr/local/bin/dot"
color = ["#eecc80", "#ccee80", "#80ccee", "#eecc80", "#80eecc"];
shape =["box", "ellipse", "octagon", "hexagon", "diamond"];
shape_len = len(shape)
pref = "cflow"
exts = ["svg", "png"]
index = {}
count = {}
stdlib = [
"assert", "isalnum", "isalpha", "iscntrl", "isdigit", "isgraph", "islower",
"isprint", "ispunct", "isspace", "isupper", "isxdigit", "toupper", "tolower",
"errno", "setlocale", "acos", "asin", "atan", "atan2", "ceil", "cos", "cosh",
"exp", "fabs", "floor", "fmod", "frexp", "ldexp", "log", "log10", "modf",
"pow", "sin", "sinh", "sqrt", "tan", "tanh", "stdlib.h", "setjmp", "longjmp",
"signal", "raise", "clearerr", "fclose", "feof", "fflush", "fgetc", "fgetpos",
"fgets", "fopen", "fprintf", "fputc", "fputs", "fread", "freopen", "fscanf",
"fseek", "fsetpos", "ftell", "fwrite", "getc", "getchar", "gets", "perror",
"printf", "putchar", "puts", "remove", "rewind", "scanf", "setbuf", "setvbuf",
"sprintf", "sscanf", "tmpfile", "tmpnam", "ungetc", "vfprintf", "vprintf",
"vsprintf", "abort", "abs", "atexit", "atof", "atoi", "atol", "bsearch",
"calloc", "div", "exit", "getenv", "free", "labs", "ldiv", "malloc", "mblen",
"mbstowcs", "mbtowc", "qsort", "rand", "realloc", "strtod", "strtol",
"strtoul", "srand", "system", "wctomb", "wcstombs", "memchr", "memcmp",
"memcpy", "memmove", "memset", "strcat", "strchr", "strcmp", "strcoll",
"strcpy", "strcspn", "strerror", "strlen", "strncat", "strncmp", "strncpy",
"strpbrk", "strrchr", "strspn", "strstr", "strtok", "strxfrm", "asctime",
"clock", "ctime", "difftime", "gmtime", "localtime", "mktime", "strftime",
"time","vsnprintf"]
pthreadlib = [
"pthread_atfork", "pthread_attr_destroy", "pthread_attr_getdetachstate",
"pthread_attr_getguardsize", "pthread_attr_getinheritsched",
"pthread_attr_getschedparam", "pthread_attr_getschedpolicy",
"pthread_attr_getscope", "pthread_attr_getstack", "pthread_attr_getstackaddr",
"pthread_attr_getstacksize", "pthread_attr_init",
"pthread_attr_setdetachstate", "pthread_attr_setguardsize",
"pthread_attr_setinheritsched", "pthread_attr_setschedparam",
"pthread_attr_setschedpolicy", "pthread_attr_setscope",
"pthread_attr_setstack", "pthread_attr_setstackaddr",
"pthread_attr_setstacksize", "pthread_barrier_destroy", "pthread_barrier_init",
"pthread_barrier_wait", "pthread_barrierattr_destroy",
"pthread_barrierattr_getpshared", "pthread_barrierattr_init",
"pthread_barrierattr_setpshared", "pthread_cancel", "pthread_cleanup_pop",
"pthread_cleanup_push", "pthread_cond_broadcast", "pthread_cond_destroy",
"pthread_cond_init", "pthread_cond_signal", "pthread_cond_timedwait",
"pthread_cond_wait", "pthread_condattr_destroy", "pthread_condattr_getclock",
"pthread_condattr_getpshared", "pthread_condattr_init",
"pthread_condattr_setclock", "pthread_condattr_setpshared", "pthread_create",
"pthread_detach", "pthread_equal", "pthread_exit", "pthread_getconcurrency",
"pthread_getcpuclockid", "pthread_getschedparam", "pthread_getspecific",
"pthread_join", "pthread_key_create", "pthread_key_delete", "pthread_kill",
"pthread_mutex_destroy", "pthread_mutex_getprioceiling", "pthread_mutex_init",
"pthread_mutex_lock", "pthread_mutex_setprioceiling",
"pthread_mutex_timedlock", "pthread_mutex_trylock", "pthread_mutex_unlock",
"pthread_mutexattr_destroy", "pthread_mutexattr_getprioceiling",
"pthread_mutexattr_getprotocol", "pthread_mutexattr_getpshared",
"pthread_mutexattr_gettype", "pthread_mutexattr_init",
"pthread_mutexattr_setprioceiling", "pthread_mutexattr_setprotocol",
"pthread_mutexattr_setpshared", "pthread_mutexattr_settype", "pthread_once",
"pthread_rwlock_destroy", "pthread_rwlock_init", "pthread_rwlock_rdlock",
"pthread_rwlock_timedrdlock", "pthread_rwlock_timedwrlock",
"pthread_rwlock_tryrdlock", "pthread_rwlock_trywrlock",
"pthread_rwlock_unlock", "pthread_rwlock_wrlock", "pthread_rwlockattr_destroy",
"pthread_rwlockattr_getpshared", "pthread_rwlockattr_init",
"pthread_rwlockattr_setpshared", "pthread_self", "pthread_setcancelstate",
"pthread_setcanceltype", "pthread_setconcurrency", "pthread_setschedparam",
"pthread_setschedprio", "pthread_setspecific", "pthread_sigmask",
"pthread_spin_destroy", "pthread_spin_init", "pthread_spin_lock",
"pthread_spin_trylock", "pthread_spin_unlock", "pthread_testcancel",
"pthread_setaffinity_np"
]
def get_parser():
ap = argparse.ArgumentParser(description="cflow2dot: generate call graph from C source code")
ap.add_argument("-e", "--exclude", metavar="symbols",
help="exclude these symbols (comma separated values) from output")
ap.add_argument("-m", "--main", metavar="NAME",
help="Assume main function to be called NAME")
ap.add_argument("-r", "--rank", default="LR", choices=["LR", "same"],
help="if rank is \"LR\", graph is left to right. If rank is \"same\", graph is top to bottom. Default value is \"LR\".")
ap.add_argument("-v", "--verbose", action="store_true",
help="increase verbosity level")
ap.add_argument("--no", metavar="NAME", action="append",
help="exclude NAME symbol set (configured in ~/.cflowdotrc) from output")
ap.add_argument("--no-pthreadlib", action="store_true",
help="exclude pthread lib symbols from output")
ap.add_argument("--no-stdlib", action="store_true",
help="exclude C stdlib symbols from output")
ap.add_argument("cflow_args", nargs=argparse.REMAINDER,
help="arguments that are passed to cflow")
return ap
def call_cflow(opts):
args = opts.cflow_args
args.insert(0, cflow_path)
args.insert(1, "-l")
if opts.main:
args.insert(1, "-m")
args.insert(2, opts.main)
if opts.verbose:
print "calling cflow with args: ", args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr and not stdout:
exit(stderr)
return stdout
def build_excludes(opts):
res = {}
if opts.exclude:
exclude_symbols = opts.exclude.split(",")
for v in exclude_symbols:
res[v] = True
if opts.no_stdlib:
for v in stdlib:
res[v] = True
if opts.no_pthreadlib:
for v in pthreadlib:
res[v] = True
if opts.no:
rcfile = os.path.expanduser("~") + "/.cflow2dotrc"
print(rcfile)
if not os.path.isfile(rcfile):
print("no ~/.cflow2dotrc file found, --no argument is skipped")
return res
else:
fp = open(rcfile)
rcdata = json.load(fp)
for exclude_set in opts.no:
if rcdata.get(exclude_set):
for func_name in rcdata[exclude_set]:
res[func_name] = True
else:
print("no key \"" + exclude_set + "\" specified in " + rcfile)
fp.close()
return res
def get_output(opts, res):
output = []
skip = False
exclude_index = -1
lines = res.split('\n')
verbose = opts.verbose
exclude = build_excludes(opts)
for line in lines:
if line == '':
continue
line = re.sub("\(.*$", "", line)
line = re.sub("^\{\s*", "", line)
line = re.sub("\}\s*", "\t", line)
parts = line.split("\t")
# indent level
n = parts[0]
# function name of callee
f = parts[1]
index[n] = f
# test if callee is in exclude list
if skip:
# exclude all sub function calls from the excluded function. If we
# get another callee at the same indent level, then stop skipping
if int(n) > int(exclude_index):
if verbose:
print("exclude sub function: " + f)
continue
else:
skip = False
exclude_index = -1
if f in exclude:
skip = True
exclude_index = n
if verbose:
print("exclude function: " + f)
continue
if n != '0':
s = "%s->%s" % (index[str(int(n) - 1)], f)
if s not in count:
output.append("node [color=\"{0}\" shape={1}];edge [color=\"{2}\"];\n{3}\n".format(color[int(n) % shape_len], shape[int(n) % shape_len], color[int(n) % shape_len], s))
count[s] = True
else:
output.append("%s [shape=box];\n" % f)
output.insert(0, "digraph G {\nnode [peripheries=2 style=\"filled,rounded\" fontname=\"Vera Sans YuanTi Mono\" color=\"%s\"];\nrankdir=%s;\nlabel=\"%s\"\n" % (color[0], opts.rank, opts.cflow_args[2]))
output.append("}\n")
return output
def write_output(output):
f = open(pref + ".dot", "w")
f.write(''.join(output))
f.close()
print("dot output to %s.dot" % pref)
if os.path.isfile(dot_path):
for ext in exts:
system("dot -T%s %s.dot -o %s.%s" % (ext, pref, pref, ext))
print("%s output to %s.%s" % (ext, pref, ext))
else:
print("'dot(GraphViz)' not installed.")
if __name__ == '__main__':
ap = get_parser()
opts = ap.parse_args()
if not os.path.isfile(cflow_path):
exit('cflow not found on: %s' % cflow_path)
res = call_cflow(opts)
output = get_output(opts, res)
write_output(output)
| mit | -4,698,164,425,426,742,000 | 39.686441 | 204 | 0.613622 | false |
kokosowy/vuadek | vuadek.py | 1 | 1180 | #!/usr/bin/python3.4
import sys
import os
import subprocess
zm_home = os.path.expanduser("~")
zm_pth_workdir = zm_home+"/.vuadek/"
if not os.path.exists(zm_pth_workdir):
os.makedirs(zm_pth_workdir)
zm_fl_remains = zm_pth_workdir+"remains"
pathname = os.path.dirname(sys.argv[1])
if not os.path.isfile(zm_fl_remains):
print('nie istnieje')
scan_result = subprocess.Popen(["uade123", "--scan", os.path.abspath(pathname)],stdout=subprocess.PIPE)
with open(zm_fl_remains, 'w') as f:
for line in scan_result.stdout:
f.write(line.decode('utf-8'))
f.closed
print('istnieje')
with open(zm_fl_remains, 'r') as f:
zm_input = [line.rstrip('\n') for line in f]
for item in zm_input:
head, tail = os.path.split(item)
subprocess.call(["uade123", "--detect-format-by-content", "-f",zm_pth_workdir+tail+'.wav', "--filter=A1200", "--normalise", "--speed-hack", "-v", "--headphones", item])
subprocess.call(["lame", "--verbose", "--preset", "standard", zm_pth_workdir+tail+'.wav', head+'/'+tail+'.mp3'])
subprocess.call(["rm", zm_pth_workdir+tail+'.wav'])
f.closed
#call(["lame", "--verbose", "--preset", "standard", zm_output, zm_mp3])
#call(["rm", zm_output])
| gpl-2.0 | -4,554,903,689,397,167,600 | 29.25641 | 170 | 0.65678 | false |
teamosceola/bitbake | lib/bb/ui/knotty.py | 1 | 12691 | #
# BitBake (No)TTY UI Implementation
#
# Handling output to TTYs or files (no TTY)
#
# Copyright (C) 2006-2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division
import os
import sys
import xmlrpclib
import logging
import progressbar
import bb.msg
from bb.ui import uihelper
logger = logging.getLogger("BitBake")
interactive = sys.stdout.isatty()
class BBProgress(progressbar.ProgressBar):
def __init__(self, msg, maxval):
self.msg = msg
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
progressbar.ETA()]
progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets)
class NonInteractiveProgress(object):
fobj = sys.stdout
def __init__(self, msg, maxval):
self.msg = msg
self.maxval = maxval
def start(self):
self.fobj.write("%s..." % self.msg)
self.fobj.flush()
return self
def update(self, value):
pass
def finish(self):
self.fobj.write("done.\n")
self.fobj.flush()
def new_progress(msg, maxval):
if interactive:
return BBProgress(msg, maxval)
else:
return NonInteractiveProgress(msg, maxval)
def pluralise(singular, plural, qty):
if(qty == 1):
return singular % qty
else:
return plural % qty
def main(server, eventHandler):
# Get values of variables which control our output
includelogs = server.runCommand(["getVariable", "BBINCLUDELOGS"])
loglines = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
consolelogfile = server.runCommand(["getVariable", "BB_CONSOLELOG"])
helper = uihelper.BBUIHelper()
console = logging.StreamHandler(sys.stdout)
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
bb.msg.addDefaultlogFilter(console)
console.setFormatter(format)
logger.addHandler(console)
if consolelogfile:
consolelog = logging.FileHandler(consolelogfile)
bb.msg.addDefaultlogFilter(consolelog)
consolelog.setFormatter(format)
logger.addHandler(consolelog)
try:
cmdline = server.runCommand(["getCmdLineAction"])
if not cmdline:
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
return 1
elif not cmdline['action']:
print(cmdline['msg'])
return 1
ret = server.runCommand(cmdline['action'])
if ret != True:
print("Couldn't get default commandline! %s" % ret)
return 1
except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x)
return 1
parseprogress = None
cacheprogress = None
shutdown = 0
interrupted = False
return_value = 0
errors = 0
warnings = 0
taskfailures = []
while True:
try:
event = eventHandler.waitEvent(0.25)
if event is None:
if shutdown > 1:
break
continue
helper.eventHandler(event)
if isinstance(event, bb.runqueue.runQueueExitWait):
if not shutdown:
shutdown = 1
if shutdown and helper.needUpdate:
activetasks, failedtasks = helper.getTasks()
if activetasks:
print("Waiting for %s active tasks to finish:" % len(activetasks))
for tasknum, task in enumerate(activetasks):
print("%s: %s (pid %s)" % (tasknum, activetasks[task]["title"], task))
if isinstance(event, logging.LogRecord):
if event.levelno >= format.ERROR:
errors = errors + 1
return_value = 1
elif event.levelno == format.WARNING:
warnings = warnings + 1
# For "normal" logging conditions, don't show note logs from tasks
# but do show them if the user has changed the default log level to
# include verbose/debug messages
if event.taskpid != 0 and event.levelno <= format.NOTE:
continue
logger.handle(event)
continue
if isinstance(event, bb.build.TaskFailed):
return_value = 1
logfile = event.logfile
if logfile and os.path.exists(logfile):
print("ERROR: Logfile of failure stored in: %s" % logfile)
if includelogs and not event.errprinted:
print("Log data follows:")
f = open(logfile, "r")
lines = []
while True:
l = f.readline()
if l == '':
break
l = l.rstrip()
if loglines:
lines.append(' | %s' % l)
if len(lines) > int(loglines):
lines.pop(0)
else:
print('| %s' % l)
f.close()
if lines:
for line in lines:
print(line)
if isinstance(event, bb.build.TaskBase):
logger.info(event._message)
continue
if isinstance(event, bb.event.ParseStarted):
if event.total == 0:
continue
parseprogress = new_progress("Parsing recipes", event.total).start()
continue
if isinstance(event, bb.event.ParseProgress):
parseprogress.update(event.current)
continue
if isinstance(event, bb.event.ParseCompleted):
if not parseprogress:
continue
parseprogress.finish()
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
continue
if isinstance(event, bb.event.CacheLoadStarted):
cacheprogress = new_progress("Loading cache", event.total).start()
continue
if isinstance(event, bb.event.CacheLoadProgress):
cacheprogress.update(event.current)
continue
if isinstance(event, bb.event.CacheLoadCompleted):
cacheprogress.finish()
print("Loaded %d entries from dependency cache." % event.num_entries)
continue
if isinstance(event, bb.command.CommandFailed):
return_value = event.exitcode
errors = errors + 1
logger.error("Command execution failed: %s", event.error)
shutdown = 2
continue
if isinstance(event, bb.command.CommandExit):
if not return_value:
return_value = event.exitcode
continue
if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)):
shutdown = 2
continue
if isinstance(event, bb.event.MultipleProviders):
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
event._item,
", ".join(event._candidates))
logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item)
continue
if isinstance(event, bb.event.NoProvider):
return_value = 1
errors = errors + 1
if event._runtime:
r = "R"
else:
r = ""
if event._dependees:
logger.error("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)", r, event._item, ", ".join(event._dependees), r)
else:
logger.error("Nothing %sPROVIDES '%s'", r, event._item)
if event._reasons:
for reason in event._reasons:
logger.error("%s", reason)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
continue
if isinstance(event, bb.runqueue.runQueueTaskStarted):
if event.noexec:
tasktype = 'noexec task'
else:
tasktype = 'task'
logger.info("Running %s %s of %s (ID: %s, %s)",
tasktype,
event.stats.completed + event.stats.active +
event.stats.failed + 1,
event.stats.total, event.taskid, event.taskstring)
continue
if isinstance(event, bb.runqueue.runQueueTaskFailed):
taskfailures.append(event.taskstring)
logger.error("Task %s (%s) failed with exit code '%s'",
event.taskid, event.taskstring, event.exitcode)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
logger.warn("Setscene task %s (%s) failed with exit code '%s' - real task will be run instead",
event.taskid, event.taskstring, event.exitcode)
continue
# ignore
if isinstance(event, (bb.event.BuildBase,
bb.event.StampUpdate,
bb.event.ConfigParsed,
bb.event.RecipeParsed,
bb.event.RecipePreFinalise,
bb.runqueue.runQueueEvent,
bb.runqueue.runQueueExitWait,
bb.event.OperationStarted,
bb.event.OperationCompleted,
bb.event.OperationProgress)):
continue
logger.error("Unknown event: %s", event)
except EnvironmentError as ioerror:
# ignore interrupted io
if ioerror.args[0] == 4:
pass
except KeyboardInterrupt:
if shutdown == 1:
print("\nSecond Keyboard Interrupt, stopping...\n")
server.runCommand(["stateStop"])
if shutdown == 0:
interrupted = True
print("\nKeyboard Interrupt, closing down...\n")
server.runCommand(["stateShutdown"])
shutdown = shutdown + 1
pass
summary = ""
if taskfailures:
summary += pluralise("\nSummary: %s task failed:",
"\nSummary: %s tasks failed:", len(taskfailures))
for failure in taskfailures:
summary += "\n %s" % failure
if warnings:
summary += pluralise("\nSummary: There was %s WARNING message shown.",
"\nSummary: There were %s WARNING messages shown.", warnings)
if return_value:
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
if summary:
print(summary)
if interrupted:
print("Execution was interrupted, returning a non-zero exit code.")
if return_value == 0:
return_value = 1
return return_value
| gpl-2.0 | -854,224,645,206,136,700 | 38.908805 | 175 | 0.532897 | false |
superdesk/eve | eve/tests/renders.py | 1 | 11071 | # -*- coding: utf-8 -*-
from eve.tests import TestBase
from eve.utils import api_prefix
from eve.tests.test_settings import MONGO_DBNAME
import simplejson as json
class TestRenders(TestBase):
def test_default_render(self):
r = self.test_client.get('/')
self.assertEqual(r.content_type, 'application/json')
def test_json_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/json')])
self.assertEqual(r.content_type, 'application/json')
def test_xml_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/xml')])
self.assertTrue('application/xml' in r.content_type)
def test_xml_url_escaping(self):
r = self.test_client.get('%s?max_results=1' % self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertTrue(b'&' in r.get_data())
def test_xml_leaf_escaping(self):
# test that even xml leaves content is being properly escaped
# We need to assign a `person` to our test invoice
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact[0]['ref'] = "12345 & 67890"
fake_contact_id = _db.contacts.insert(fake_contact)[0]
r = self.test_client.get('%s/%s' %
(self.known_resource_url, fake_contact_id),
headers=[('Accept', 'application/xml')])
self.assertTrue(b'12345 & 6789' in r.get_data())
def test_xml_ordered_nodes(self):
""" Test that xml nodes are ordered and #441 is addressed.
"""
r = self.test_client.get('%s?max_results=1' % self.known_resource_url,
headers=[('Accept', 'application/xml')])
data = r.get_data()
idx1 = data.index(b'_created')
idx2 = data.index(b'_etag')
idx3 = data.index(b'_id')
idx4 = data.index(b'_updated')
self.assertTrue(idx1 < idx2 < idx3 < idx4)
idx1 = data.index(b'max_results')
idx2 = data.index(b'page')
idx3 = data.index(b'total')
self.assertTrue(idx1 < idx2 < idx3)
idx1 = data.index(b'last')
idx2 = data.index(b'next')
idx3 = data.index(b'parent')
self.assertTrue(idx1 < idx2 < idx3)
def test_unknown_render(self):
r = self.test_client.get('/', headers=[('Accept', 'application/html')])
self.assertEqual(r.content_type, 'application/json')
def test_json_xml_disabled(self):
self.app.config['JSON'] = False
self.app.config['XML'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assert500(r.status_code)
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assert500(r.status_code)
r = self.test_client.get(self.known_resource_url)
self.assert500(r.status_code)
def test_json_disabled(self):
self.app.config['JSON'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertTrue('application/xml' in r.content_type)
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertTrue('application/xml' in r.content_type)
r = self.test_client.get(self.known_resource_url)
self.assertTrue('application/xml' in r.content_type)
def test_xml_disabled(self):
self.app.config['XML'] = False
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/xml')])
self.assertEqual(r.content_type, 'application/json')
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertEqual(r.content_type, 'application/json')
r = self.test_client.get(self.known_resource_url)
self.assertEqual(r.content_type, 'application/json')
def test_json_keys_sorted(self):
self.app.config['JSON_SORT_KEYS'] = True
r = self.test_client.get(self.known_resource_url,
headers=[('Accept', 'application/json')])
self.assertEqual(
json.dumps(json.loads(r.get_data()), sort_keys=True).encode(),
r.get_data()
)
def test_CORS(self):
# no CORS headers if Origin is not provided with the request.
r = self.test_client.get('/')
self.assertFalse('Access-Control-Allow-Origin' in r.headers)
self.assertFalse('Access-Control-Allow-Methods' in r.headers)
self.assertFalse('Access-Control-Allow-Max-Age' in r.headers)
self.assertFalse('Access-Control-Expose-Headers' in r.headers)
self.assert200(r.status_code)
# test that if X_DOMAINS is set to '*', then any Origin value is
# allowed. Also test that only the Origin header included with the
# request will be # returned back to the client.
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://example.com')
self.assertEqual(r.headers['Vary'], 'Origin')
# test that if a list is set for X_DOMAINS, then:
# 1. only list values are accepted;
# 2. only the value included with the request is returned back.
self.app.config['X_DOMAINS'] = ['http://1of2.com', 'http://2of2.com']
r = self.test_client.get('/', headers=[('Origin', 'http://1of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://1of2.com')
r = self.test_client.get('/', headers=[('Origin', 'http://2of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://2of2.com')
r = self.test_client.get('/', headers=[('Origin',
'http://notreally.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'], '')
# other Access-Control-Allow- headers are included.
self.assertTrue('Access-Control-Allow-Headers' in r.headers)
self.assertTrue('Access-Control-Allow-Methods' in r.headers)
self.assertTrue('Access-Control-Allow-Max-Age' in r.headers)
self.assertTrue('Access-Control-Expose-Headers' in r.headers)
def test_CORS_MAX_AGE(self):
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assertEqual(r.headers['Access-Control-Allow-Max-Age'],
'21600')
self.app.config['X_MAX_AGE'] = 2000
r = self.test_client.get('/', headers=[('Origin',
'http://example.com')])
self.assertEqual(r.headers['Access-Control-Allow-Max-Age'],
'2000')
def test_CORS_OPTIONS(self, url='/', methods=None):
if methods is None:
methods = []
r = self.test_client.open(url, method='OPTIONS')
self.assertFalse('Access-Control-Allow-Origin' in r.headers)
self.assertFalse('Access-Control-Allow-Methods' in r.headers)
self.assertFalse('Access-Control-Allow-Max-Age' in r.headers)
self.assertFalse('Access-Control-Expose-Headers' in r.headers)
self.assert200(r.status_code)
# test that if X_DOMAINS is set to '*', then any Origin value is
# allowed. Also test that only the Origin header included with the
# request will be # returned back to the client.
self.app.config['X_DOMAINS'] = '*'
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://example.com')
self.assertEqual(r.headers['Vary'], 'Origin')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
self.app.config['X_DOMAINS'] = ['http://1of2.com', 'http://2of2.com']
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://1of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://1of2.com')
r = self.test_client.open(url, method='OPTIONS',
headers=[('Origin', 'http://2of2.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'],
'http://2of2.com')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
self.assertTrue('Access-Control-Allow-Origin' in r.headers)
self.assertTrue('Access-Control-Allow-Max-Age' in r.headers)
self.assertTrue('Access-Control-Expose-Headers' in r.headers)
r = self.test_client.get(url, headers=[('Origin',
'http://not_an_example.com')])
self.assert200(r.status_code)
self.assertEqual(r.headers['Access-Control-Allow-Origin'], '')
for m in methods:
self.assertTrue(m in r.headers['Access-Control-Allow-Methods'])
def test_CORS_OPTIONS_resources(self):
prefix = api_prefix(self.app.config['URL_PREFIX'],
self.app.config['API_VERSION'])
del(self.domain['peopleinvoices'])
del(self.domain['peoplesearches'])
del(self.domain['internal_transactions'])
for _, settings in self.app.config['DOMAIN'].items():
# resource endpoint
url = '%s/%s/' % (prefix, settings['url'])
methods = settings['resource_methods'] + ['OPTIONS']
self.test_CORS_OPTIONS(url, methods)
def test_CORS_OPTIONS_item(self):
prefix = api_prefix(self.app.config['URL_PREFIX'],
self.app.config['API_VERSION'])
url = '%s%s' % (prefix, self.item_id_url)
methods = (self.domain[self.known_resource]['resource_methods'] +
['OPTIONS'])
self.test_CORS_OPTIONS(url, methods)
url = '%s%s/%s' % (prefix, self.known_resource_url, self.item_ref)
methods = ['GET', 'OPTIONS']
self.test_CORS_OPTIONS(url, methods)
| bsd-3-clause | -1,690,569,143,142,862,800 | 44.937759 | 79 | 0.571583 | false |
MaxLinCode/tardy-HackIllinois-2017 | alexa/lambda_function.py | 1 | 6803 | """
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Built-in Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
from twilio.rest import TwilioRestClient
from loadData import rawToTime, getNumber
from config import *
accountSid = 'ACcf54ef49063aaa784c99aec82d7f16c2'
authToken = '31f817a48ee7cd461c07c57490eac6ce'
fromNumber = '19163183442'
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Hello, welcome to the Tardy skill."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "You can ask me to send a message to your friends."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def sarah_intent_handler(intent):
card_title = "Sarah"
speech_output = "Sarah is the best"
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def formatMessage(userName, targetName, time, place):
return "Hello %s, %s would like to meet at %s at %s." % (targetName.title(), userName.title(), place.title(), time)
def getInfo(userId, target, time, place):
d = {}
time = rawToTime(time)
userName = ""
for x in target:
arr = getNumber(userId, target)
if userName == "":
username = arr[0]
d[arr[1]] = [arr[2], formatMessage(userName, a[1], time, place)]
for key in d:
sendText(d[key][0], d[key][1])
def twilio_intent_handler(intent):
card_title = "Twilio"
#print(intent['slots'])
target = intent["slots"]["nameSlot"]["value"]
time = intent["slots"]["timeSlot"]["value"]
place = intent["slots"]["placeSlot"]["value"]
#userID = kijZjJJ5ozPZxfeHYfjh3zd3TUh1
getInfo('kijZjJJ5ozPZxfeHYfjh3zd3TUh1', target, time, place)
#cellNumber = ""
#messageText = ""
#slots = intent['slots']
#cellNumber = slots['numberSlot']['value']
#messageText = slots['msgSlot']['value']
# call the method to send text
speech_output = "Message sent."
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
#number,message
def sendText(to_num, msg_text):
try:
client = TwilioRestClient(accountSid, authToken)
client.messages.create(
to=to_num,
from_=from_num,
body=msg_text
)
return True
except Exception as e:
print("Failed to send message: ")
print(e.code)
return False
def help_intent_handler(intent):
card_title = "Help"
speech_output = "Ask me to send someone a text."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def misunderstood_handler(intent):
card_title = "Misunderstood"
speech_output = "Sorry, please try again."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, True))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying our Tardy skill. " \
"Have a great time at Hack Illinois! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response(None, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_launch(launch_request):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "SarahIntent":
return sarah_intent_handler(intent)
elif intent_name == "TwilioIntent":
return twilio_intent_handler(intent)
elif intent_name == "HelpIntent":
return help_intent_handler(intent)
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
return misunderstood_handler(intent)
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
session_attributes = {}
#applicationId = event['session']['application']['applicationId']
#if applicationId != TWILIO_APPLICATION_ID:
# should_end_session = True
# bad_request_output = "Bad Request"
# print("Bad ApplicationId Received: "+applicationId)
# return build_response(session_attributes, build_speechlet_response("Twilio", bad_request_output, None, should_end_session))
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request']) | mit | 3,407,677,043,786,212,400 | 32.850746 | 132 | 0.641482 | false |
quru/qis | src/imageserver/errors.py | 1 | 2593 | #
# Quru Image Server
#
# Document: errors.py
# Date started: 31 Mar 2011
# By: Matt Fozard
# Purpose: Internal errors and exceptions
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
#
class ImageError(ValueError):
"""
An error resulting from an invalid or unsupported imaging operation.
"""
pass
class AlreadyExistsError(ValueError):
"""
An error resulting from a duplicate value or an attempt to create an
object that already exists.
"""
pass
class DoesNotExistError(ValueError):
"""
An error resulting from an attempt to use an object that does not exist.
"""
pass
class SecurityError(Exception):
"""
An error resulting from some unauthorised action.
"""
pass
class StartupError(Exception):
"""
An error that should prevent server startup.
"""
pass
class AuthenticationError(Exception):
"""
An error resulting from a failure to authenticate.
"""
pass
class DBError(Exception):
"""
An error resulting from a database operation.
Adds an optional extra 'sql' attribute.
"""
def __init__(self, message, sql=None):
Exception.__init__(self, message)
self.sql = sql if sql is not None else ''
class DBDataError(DBError):
"""
An error resulting from incorrect database data.
"""
pass
class ParameterError(ValueError):
"""
An error resulting from an invalid parameter value.
"""
pass
class TimeoutError(RuntimeError):
"""
An error resulting from an operation timeout.
"""
pass
class ServerTooBusyError(RuntimeError):
"""
Raised when the server is too busy to service a request.
"""
pass
| agpl-3.0 | 6,222,362,547,311,885,000 | 22.36036 | 79 | 0.649055 | false |
lukas-bednar/python-rrmngmnt | rrmngmnt/ssh.py | 1 | 10209 | import os
import time
import socket
import paramiko
import contextlib
import subprocess
from rrmngmnt.executor import Executor
AUTHORIZED_KEYS = os.path.join("%s", ".ssh/authorized_keys")
KNOWN_HOSTS = os.path.join("%s", ".ssh/known_hosts")
ID_RSA_PUB = os.path.join("%s", ".ssh/id_rsa.pub")
ID_RSA_PRV = os.path.join("%s", ".ssh/id_rsa")
CONNECTIVITY_TIMEOUT = 600
CONNECTIVITY_SAMPLE_TIME = 20
class RemoteExecutor(Executor):
"""
Any resource which provides SSH service.
This class is meant to replace our current utilities.machine.LinuxMachine
classs. This allows you to lower access to communicate with ssh.
Like a live interaction, getting rid of True/False results, and
mixing stdout with stderr.
You can still use use 'run_cmd' method if you don't care.
But I would recommed you to work like this:
"""
TCP_TIMEOUT = 10.0
class LoggerAdapter(Executor.LoggerAdapter):
"""
Makes sure that all logs which are done via this class, has
appropriate prefix. [user@IP/password]
"""
def process(self, msg, kwargs):
return (
"[%s@%s/%s] %s" % (
self.extra['self'].user.name,
self.extra['self'].address,
self.extra['self'].user.password,
msg,
),
kwargs,
)
class Session(Executor.Session):
"""
Represents active ssh connection
"""
def __init__(self, executor, timeout=None, use_pkey=False):
super(RemoteExecutor.Session, self).__init__(executor)
if timeout is None:
timeout = RemoteExecutor.TCP_TIMEOUT
self._timeout = timeout
self._ssh = paramiko.SSHClient()
self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if use_pkey:
self.pkey = paramiko.RSAKey.from_private_key_file(
ID_RSA_PRV % os.path.expanduser('~')
)
self._executor.user.password = None
else:
self.pkey = None
def __exit__(self, type_, value, tb):
if type_ is socket.timeout:
self._update_timeout_exception(value)
try:
self.close()
except Exception as ex:
if type_ is None:
raise
else:
self._executor.logger.debug(
"Can not close ssh session %s", ex,
)
def open(self):
self._ssh.get_host_keys().clear()
try:
self._ssh.connect(
self._executor.address,
username=self._executor.user.name,
password=self._executor.user.password,
timeout=self._timeout,
pkey=self.pkey
)
except (socket.gaierror, socket.herror) as ex:
args = list(ex.args)
message = "%s: %s" % (self._executor.address, args[1])
args[1] = message
ex.strerror = message
ex.args = tuple(args)
raise
except socket.timeout as ex:
self._update_timeout_exception(ex)
raise
def close(self):
self._ssh.close()
def _update_timeout_exception(self, ex, timeout=None):
if getattr(ex, '_updated', False):
return
if timeout is None:
timeout = self._timeout
message = "%s: timeout(%s)" % (
self._executor.address, timeout
)
ex.args = (message,)
ex._updated = True
def command(self, cmd):
return RemoteExecutor.Command(cmd, self)
def run_cmd(self, cmd, input_=None, timeout=None):
cmd = self.command(cmd)
return cmd.run(input_, timeout)
@contextlib.contextmanager
def open_file(self, path, mode='r', bufsize=-1):
with contextlib.closing(self._ssh.open_sftp()) as sftp:
with contextlib.closing(
sftp.file(
path,
mode,
bufsize,
)
) as fh:
yield fh
class Command(Executor.Command):
"""
This class holds all data related to command execution.
- the command itself
- stdout/stderr streams
- out/err string which were produced by command
- returncode the exit status of command
"""
def __init__(self, cmd, session):
super(RemoteExecutor.Command, self).__init__(
subprocess.list2cmdline(cmd),
session,
)
self._in = None
self._out = None
self._err = None
def get_rc(self, wait=False):
if self._rc is None:
if self._out is not None:
if self._out.channel.exit_status_ready() or wait:
self._rc = self._out.channel.recv_exit_status()
return self._rc
@contextlib.contextmanager
def execute(self, bufsize=-1, timeout=None, get_pty=False):
"""
This method allows you to work directly with streams.
with cmd.execute() as in_, out, err:
# where in_, out and err are file-like objects
# where you can read data from these
"""
try:
self.logger.debug("Executing: %s", self.cmd)
self._in, self._out, self._err = self._ss._ssh.exec_command(
self.cmd,
bufsize=bufsize,
timeout=timeout,
get_pty=get_pty,
)
yield self._in, self._out, self._err
self.get_rc(True)
except socket.timeout as ex:
self._ss._update_timeout_exception(ex, timeout)
raise
finally:
if self._in is not None:
self._in.close()
if self._out is not None:
self._out.close()
if self._err is not None:
self._err.close()
self.logger.debug("Results of command: %s", self.cmd)
self.logger.debug(" OUT: %s", self.out)
self.logger.debug(" ERR: %s", self.err)
self.logger.debug(" RC: %s", self.rc)
def run(self, input_, timeout=None, get_pty=False):
with self.execute(
timeout=timeout, get_pty=get_pty
) as (in_, out, err):
if input_:
in_.write(input_)
in_.close()
self.out = out.read()
self.err = err.read()
return self.rc, self.out, self.err
def __init__(self, user, address, use_pkey=False):
"""
:param user: user
:type user: instance of User
:param address: ip / hostname
:type address: str
:param use_pkey: use ssh private key in the connection
:type use_pkey: bool
"""
super(RemoteExecutor, self).__init__(user)
self.address = address
self.use_pkey = use_pkey
def session(self, timeout=None):
"""
:param timeout: tcp timeout
:type timeout: float
:return: the session
:rtype: instance of RemoteExecutor.Session
"""
return RemoteExecutor.Session(self, timeout, self.use_pkey)
def run_cmd(self, cmd, input_=None, tcp_timeout=None, io_timeout=None):
"""
:param cmd: command
:type cmd: list
:param input_: input data
:type input_: str
:param tcp_timeout: tcp timeout
:type tcp_timeout: float
:param io_timeout: timeout for data operation (read/write)
:type io_timeout: float
:return: rc, out, err
:rtype: tuple (int, str, str)
"""
with self.session(tcp_timeout) as session:
return session.run_cmd(cmd, input_, io_timeout)
def is_connective(self, tcp_timeout=20.0):
"""
Check if address is connective via ssh
:param tcp_timeout: time to wait for response
:type tcp_timeout: float
:return: True if address is connective, False otherwise
:rtype: bool
"""
try:
self.logger.info(
"Check if address is connective via ssh in given timeout %s",
tcp_timeout
)
self.run_cmd(['true'], tcp_timeout=tcp_timeout)
return True
except (socket.timeout, socket.error) as e:
self.logger.debug("Socket error: %s", e)
except Exception as e:
self.logger.debug("SSH exception: %s", e)
return False
def wait_for_connectivity_state(
self, positive,
timeout=CONNECTIVITY_TIMEOUT,
sample_time=CONNECTIVITY_SAMPLE_TIME
):
"""
Wait until address will be connective or not via ssh
:param positive: wait for the positive or negative connective state
:type positive: bool
:param timeout: wait timeout
:type timeout: int
:param sample_time: sample the ssh each sample_time seconds
:type sample_time: int
:return: True, if positive and ssh is connective or
negative and ssh does not connective, otherwise False
:rtype: bool
"""
reachable = "unreachable" if positive else "reachable"
timeout_counter = 0
while self.is_connective() != positive:
if timeout_counter > timeout:
self.logger.error(
"Address %s is still %s via ssh, after %s seconds",
self.address, reachable, timeout
)
return False
time.sleep(sample_time)
timeout_counter += sample_time
return True
| gpl-2.0 | -7,273,027,268,052,574,000 | 33.962329 | 77 | 0.515526 | false |
lipro-yocto/git-repo | subcmds/cherry_pick.py | 1 | 3421 | # Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from command import Command
from git_command import GitCommand
CHANGE_ID_RE = re.compile(r'^\s*Change-Id: I([0-9a-f]{40})\s*$')
class CherryPick(Command):
common = True
helpSummary = "Cherry-pick a change."
helpUsage = """
%prog <sha1>
"""
helpDescription = """
'%prog' cherry-picks a change from one branch to another.
The change id will be updated, and a reference to the old
change id will be added.
"""
def _Options(self, p):
pass
def ValidateOptions(self, opt, args):
if len(args) != 1:
self.Usage()
def Execute(self, opt, args):
reference = args[0]
p = GitCommand(None,
['rev-parse', '--verify', reference],
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
print(p.stderr, file=sys.stderr)
sys.exit(1)
sha1 = p.stdout.strip()
p = GitCommand(None, ['cat-file', 'commit', sha1], capture_stdout=True)
if p.Wait() != 0:
print("error: Failed to retrieve old commit message", file=sys.stderr)
sys.exit(1)
old_msg = self._StripHeader(p.stdout)
p = GitCommand(None,
['cherry-pick', sha1],
capture_stdout=True,
capture_stderr=True)
status = p.Wait()
print(p.stdout, file=sys.stdout)
print(p.stderr, file=sys.stderr)
if status == 0:
# The cherry-pick was applied correctly. We just need to edit the
# commit message.
new_msg = self._Reformat(old_msg, sha1)
p = GitCommand(None, ['commit', '--amend', '-F', '-'],
provide_stdin=True,
capture_stdout=True,
capture_stderr=True)
p.stdin.write(new_msg)
p.stdin.close()
if p.Wait() != 0:
print("error: Failed to update commit message", file=sys.stderr)
sys.exit(1)
else:
print('NOTE: When committing (please see above) and editing the commit '
'message, please remove the old Change-Id-line and add:')
print(self._GetReference(sha1), file=sys.stderr)
print(file=sys.stderr)
def _IsChangeId(self, line):
return CHANGE_ID_RE.match(line)
def _GetReference(self, sha1):
return "(cherry picked from commit %s)" % sha1
def _StripHeader(self, commit_msg):
lines = commit_msg.splitlines()
return "\n".join(lines[lines.index("") + 1:])
def _Reformat(self, old_msg, sha1):
new_msg = []
for line in old_msg.splitlines():
if not self._IsChangeId(line):
new_msg.append(line)
# Add a blank line between the message and the change id/reference
try:
if new_msg[-1].strip() != "":
new_msg.append("")
except IndexError:
pass
new_msg.append(self._GetReference(sha1))
return "\n".join(new_msg)
| apache-2.0 | 5,867,796,251,983,264,000 | 28.747826 | 78 | 0.621163 | false |
CloudBoltSoftware/cloudbolt-forge | ui_extensions/multilevelapprovals/views.py | 1 | 14185 | from urllib.parse import urlparse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.urls import reverse
from django.utils.translation import ugettext as _, ungettext
from accounts.models import UserProfile
from accounts.templatetags import account_tags
from cbhooks.exceptions import HookFailureException
from common.views import clear_cached_submenu
from costs.utils import (
is_rates_feature_enabled,
)
from cscv.models import CITConf, can_order_be_tested, CITTest
from orders.forms import DenyOrderForm
from orders.models import Order
from orders.templatetags.order_tags import order_pictograph, order_status_icon
from quota.exceptions import QuotaError
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import json_view, dialog_view
from utilities.exceptions import (
InvalidCartException, InvalidConfigurationException,
CloudBoltException
)
from utilities.cb_http import django_sort_cols_from_datatable_request
from utilities.logger import ThreadLogger
from utilities.templatetags.helper_tags import link_or_label, how_long_ago
from utilities.views import access_denied
from .models import CustomOrder
from extensions.views import admin_extension
#@admin_extension(title='Multilevel Approvals Extension')
logger = ThreadLogger(__name__)
# Intentionally not protected at view level
@admin_extension(title='Multilevel Approvals Extension')
def order_list(request, message=""):
profile = request.get_user_profile()
# NOTE: order info will be sent via AJAX
return render(request, 'multilevelapprovals/templates/list.html', {
'pagetitle': _("Order List"),
'message': message,
'profile': profile,
'enable_rates_feature': is_rates_feature_enabled(),
})
# Intentionally not protected at view level
@json_view
def order_list_json(request, extra_context={}):
profile = request.get_user_profile()
# List of orders the user has permissions to view:
orders = Order.objects_for_profile(profile)
num_total_records = orders.count()
search = request.GET.get('sSearch')
if search:
orders = orders.search(search)
num_filtered_records = orders.count()
# Sorting: client passes column # which must be translated to model field
sort_cols = django_sort_cols_from_datatable_request(request, [
'id',
None,
'status',
'group',
# order by first & last which is how it's presented
['owner__user__first_name', 'owner__user__last_name'],
'create_date',
None, # Actions column is not sortable
])
orders = orders.order_by(*sort_cols)
# Pagination:
start = int(request.GET.get('iDisplayStart', None))
if start is not None:
end = int(start) + int(request.GET.get('iDisplayLength', 0))
orders = orders[start:end]
# Cache links to objects (since generating each requires a database hit):
_group_link_or_label_cache = {}
_owner_link_or_label_cache = {}
profiles_visible_to_this_profile = UserProfile.objects_for_profile(profile)
def cached_group_link_or_label(group):
try:
return _group_link_or_label_cache[group]
except KeyError:
rendered = link_or_label(group, profile)
_group_link_or_label_cache[group] = rendered
return rendered
def cached_owner_link_or_label(owner):
"""
Ensure that owner avatar and link-or-label is only constructed once
per page view.
"""
if not owner or not owner.user:
return ""
try:
rendered = _owner_link_or_label_cache[owner]
except KeyError:
rendered = account_tags.rich_gravatar(
owner,
size=20,
link=(owner in profiles_visible_to_this_profile),
full_name=True
)
_owner_link_or_label_cache[owner] = rendered
return rendered
actions_template = loader.get_template('multilevelapprovals/templates/actions.html')
rows = []
for order in orders:
# Render the actions column value as HTML:
actions_html = actions_template.render(context={
'order': order,
'profile': profile,
'is_owner': order.owner == profile,
'can_approve': profile.has_permission('order.approve', order),
'can_cancel': order.can_cancel(profile),
'can_save_to_catalog': order.can_save_to_catalog(profile),
}, request=request)
#approval_str = "" #SRM
#for dict in is_multilevel_approval(order):
# for key in dict.keys():
# strng = UserProfile.objects.get(id=dict[key]).user.username
# if not approval_str:
# approval_str = key + ":", strng
# else:
# approval_str += "<BR>" + key + ":", strng
row = [
# We know that the user has access to view this order already,
# so show URL instead of link_or_label:
'<a href="%s">%s</a>' % (order.get_absolute_url(),
order.nickname()),
order_pictograph(order),
order_status_icon(order),
cached_group_link_or_label(order.group),
cached_owner_link_or_label(order.owner),
how_long_ago(order.create_date),
actions_html,
]
rows.append(row)
return {
# unaltered from client-side value, but cast to int to avoid XSS
# http://datatables.net/usage/server-side
"sEcho": int(request.GET.get('sEcho', 1)),
"iTotalRecords": num_total_records,
"iTotalDisplayRecords": num_filtered_records,
"aaData": rows, # Orders for the current page only
}
def modify(request, order_id):
"""
POST requests from the order list and detail views go here.
"""
order = get_object_or_404(Order, pk=order_id)
profile = request.get_user_profile()
# action matches the button values in order_actions templatetag.
action = request.POST.get('action', [''])
logger.info(f'SRM: in modify: action == {action}')
if action in ['approve', 'deny']:
if not profile.has_permission('order.approve', order):
return access_denied(
request, _("You do not have permission to approve this item."))
msg = ""
redirect_url = request.META['HTTP_REFERER']
if action == 'submit':
if not profile.has_permission('order.submit', order):
return access_denied(
request, _("You do not have permission to submit this order."))
try:
order.submit()
msg += order.start_approval_process(request)
messages.info(request, msg)
except QuotaError as e: # could happen if order is auto-approved
messages.error(request, e)
except InvalidConfigurationException as e:
messages.error(request, e)
except HookFailureException as e:
messages.error(request, e)
redirect_url = reverse('order_detail', args=[order.id])
elif action == 'approve':
logger.info('SRM: in modify: action == approve (should work) -- b4 approve_my_grms')
logger.info(f'SRM: order = {order}')
logger.info(f'SRM: profile = {profile}')
if CustomOrder.is_multilevel_approval(order):
logger.info(f'SRM: is multilevel -- approving GRMs')
CustomOrder.approve_my_grms(order, profile)
if all(CustomOrder.is_multilevel_approval(order).values()):
logger.info(f'SRM: all values return true - can approve')
else:
logger.info(f'SRM: not all values return true - cant approve')
messages.info(request, "partial approval processed")
return HttpResponseRedirect(reverse('order_detail', args=[order.id]))
try:
jobs, extramsg = order.approve(profile)
if jobs:
# template tweaks the message based on where we are going next
redirect_parsed = urlparse(redirect_url)
msg = loader.render_to_string('orders/approved_msg.html', {
'order': order,
'autoapproved': False,
'num_jobs': len(jobs),
'extramsg': extramsg,
'request': request,
'redirect_url': redirect_parsed.path,
})
else:
msg = extramsg
messages.info(request, msg)
except QuotaError as e:
messages.error(request, e)
except CloudBoltException as e:
messages.warning(request, e)
except:
raise
elif action == 'cancel':
if not order.can_cancel(profile):
return access_denied(
request, _("You do not have permission to cancel this order."))
order.cancel()
if order.owner:
clear_cached_submenu(order.owner.user_id, 'orders')
msg = _("Order #{order_id} has been canceled.").format(order_id=order.id)
messages.info(request, msg)
elif action == 'clear':
order.group = None
order.blueprint = None
order.save()
for order_item in order.orderitem_set.all():
order_item.delete()
if order.owner:
clear_cached_submenu(order.owner.user_id, 'orders')
messages.success(request, _("Your current order has been cleared."))
elif action == 'remind':
logger.info(_("User requested order approval reminder for order {order_id}").format(order_id=order_id))
try:
msg = order.send_reminder(request)
logger.debug(msg)
messages.info(request, msg)
except InvalidConfigurationException as e:
messages.error(request, e)
elif action == 'duplicate':
# Global Viewers are a special case where objects_for_profile will
# return True since they can view all orders, but we don't want them to
# be able to do anything like duplicate it (unless they have additional
# permissions)
duplicable, reason = order.can_duplicate(profile)
if not duplicable:
if reason == 'permission':
return access_denied(
request, _("You do not have permission to duplicate this order."))
elif reason == 'group':
messages.error(request, _("Orders with no group cannot be duplicated."))
return HttpResponseRedirect(reverse('order_detail', args=[order.id]))
try:
profile = request.get_user_profile()
cart = profile.get_current_order()
cart = order.duplicate(cart)
items_duplicated = cart.items_duplicated
hostnames_updated = cart.hostnames_updated
msg = ungettext("Duplicated {num_items} order item under "
"<a href='{url}'>your current order</a>.",
"Duplicated {num_items} order items under "
"<a href='{url}'>your current order</a>.",
items_duplicated).format(num_items=items_duplicated,
url=cart.get_absolute_url())
if hostnames_updated:
uniq_msg = ungettext("{updated_count} order item was updated to "
"avoid creating identical hostnames.",
"{updated_count} order items were updated to "
"avoid creating identical hostnames.",
hostnames_updated).format(updated_count=hostnames_updated)
msg += uniq_msg
clear_cached_submenu(profile.user_id, 'orders')
messages.success(request, msg)
return HttpResponseRedirect(reverse('current_order'))
except InvalidCartException as e:
messages.error(request, e)
elif action == 'save_as_blueprint':
profile = request.get_user_profile()
if order.group and not profile.has_permission('blueprint.manage', order.group):
return access_denied(
request, _("You need to have blueprint management permission for "
"group '{group}' to create a blueprint from this order.").format(group=order.group))
bp = ServiceBlueprint.from_order(order)
clear_cached_submenu(profile.user_id, 'catalog')
messages.success(
request,
_("Successfully saved the <a href='{order_url}'>order</a> "
"as blueprint <a href='{blueprint_url}'>{blueprint_name}</a>").format(
order_url=order.get_absolute_url(),
blueprint_url=bp.get_absolute_url(),
blueprint_name=bp.name))
redirect_url = bp.get_absolute_url()
elif action == 'add_to_cit':
if can_order_be_tested(order):
cit_test = CITTest.objects.create(
name=order.name,
order=order,
cit_conf=CITConf.objects.first(),
expected_status=order.status,
)
messages.success(
request,
_('Created CIT test "{}". It will be automatically tested during '
'the text text run.'.format(link_or_label(cit_test, profile)))
)
else:
messages.error(request, "This order could not be added to CIT.")
return HttpResponseRedirect(redirect_url)
| apache-2.0 | -6,349,238,309,283,162,000 | 38.298295 | 111 | 0.582023 | false |
LeandroRoberto/sapl | sapl/comissoes/views.py | 1 | 3353 |
from django.core.urlresolvers import reverse
from django.db.models import F
from django.views.generic import ListView
from sapl.crud.base import RP_DETAIL, RP_LIST, Crud, CrudAux, MasterDetailCrud
from sapl.materia.models import MateriaLegislativa, Tramitacao
from .models import (CargoComissao, Comissao, Composicao, Participacao,
Periodo, TipoComissao)
def pegar_url_composicao(pk):
participacao = Participacao.objects.get(id=pk)
comp_pk = participacao.composicao.pk
url = reverse('sapl.comissoes:composicao_detail', kwargs={'pk': comp_pk})
return url
CargoCrud = CrudAux.build(CargoComissao, 'cargo_comissao')
PeriodoComposicaoCrud = CrudAux.build(Periodo, 'periodo_composicao_comissao')
TipoComissaoCrud = CrudAux.build(
TipoComissao, 'tipo_comissao', list_field_names=[
'sigla', 'nome', 'natureza', 'dispositivo_regimental'])
class ParticipacaoCrud(MasterDetailCrud):
model = Participacao
parent_field = 'composicao__comissao'
public = [RP_DETAIL, ]
ListView = None
is_m2m = True
link_return_to_parent_field = True
class BaseMixin(MasterDetailCrud.BaseMixin):
list_field_names = ['composicao', 'parlamentar', 'cargo']
class ComposicaoCrud(MasterDetailCrud):
model = Composicao
parent_field = 'comissao'
model_set = 'participacao_set'
public = [RP_LIST, RP_DETAIL, ]
class ListView(MasterDetailCrud.ListView):
template_name = "comissoes/composicao_list.html"
paginate_by = None
def take_composicao_pk(self):
try:
return int(self.request.GET['pk'])
except:
return 0
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['composicao_pk'] = context['composicao_list'].last(
).pk if self.take_composicao_pk(
) == 0 else self.take_composicao_pk()
context['participacao_set'] = Participacao.objects.filter(
composicao__pk=context['composicao_pk']
).order_by('parlamentar')
return context
class ComissaoCrud(Crud):
model = Comissao
help_path = 'modulo_comissoes'
public = [RP_LIST, RP_DETAIL, ]
class BaseMixin(Crud.BaseMixin):
list_field_names = ['nome', 'sigla', 'tipo', 'data_criacao', 'ativa']
ordering = '-ativa', 'sigla'
class MateriasTramitacaoListView(ListView):
template_name = "comissoes/materias_em_tramitacao.html"
paginate_by = 10
def get_queryset(self):
# FIXME: Otimizar consulta
ts = Tramitacao.objects.order_by(
'materia', '-data_tramitacao', '-id').annotate(
comissao=F('unidade_tramitacao_destino__comissao')).distinct(
'materia').values_list('materia', 'comissao')
ts = list(filter(lambda x: x[1] == int(self.kwargs['pk']), ts))
ts = list(zip(*ts))
ts = ts[0] if ts else []
materias = MateriaLegislativa.objects.filter(
pk__in=ts).order_by('tipo', '-ano', '-numero')
return materias
def get_context_data(self, **kwargs):
context = super(
MateriasTramitacaoListView, self).get_context_data(**kwargs)
context['object'] = Comissao.objects.get(id=self.kwargs['pk'])
return context
| gpl-3.0 | 4,696,481,446,960,925,000 | 32.53 | 78 | 0.638831 | false |
mdovgialo/steam-vr-wheel | steam_vr_wheel/pyvjoy/_wrapper.py | 1 | 2789 | import os
import sys
from ctypes import *
dll_filename = "vJoyInterface.dll"
dll_path = os.path.dirname(__file__) + os.sep + dll_filename
try:
_vj = cdll.LoadLibrary(dll_path)
except OSError:
sys.exit("Unable to load vJoy SDK DLL. Ensure that %s is present" % dll_filename)
def vJoyEnabled():
"""Returns True if vJoy is installed and enabled"""
result = _vj.vJoyEnabled()
if result == 0:
raise vJoyNotEnabledException()
else:
return True
def DriverMatch():
"""Check if the version of vJoyInterface.dll and the vJoy Driver match"""
result = _vj.DriverMatch()
if result == 0:
raise vJoyDriverMismatch()
else:
return True
def GetVJDStatus(rID):
"""Get the status of a given vJoy Device"""
return _vj.GetVJDStatus(rID)
def AcquireVJD(rID):
"""Attempt to acquire a vJoy Device"""
result = _vj.AcquireVJD(rID)
if result == 0:
#Check status
status = GetVJDStatus(rID)
if status != VJD_STAT_FREE:
raise vJoyFailedToAcquireException("Cannot acquire vJoy Device because it is not in VJD_STAT_FREE")
else:
raise vJoyFailedToAcquireException()
else:
return True
def RelinquishVJD(rID):
"""Relinquish control of a vJoy Device"""
result = _vj.RelinquishVJD(rID)
if result == 0:
raise vJoyFailedToRelinquishException()
else:
return True
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def SetDiscPov(PovValue, rID, PovID):
"""Write Value to a given discrete POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 3:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetDiscPov(PovValue,rID,PovID)
def SetContPov(PovValue, rID, PovID):
"""Write Value to a given continuous POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 35999:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetContPov(PovValue,rID,PovID)
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def ResetVJD(rID):
"""Reset all axes and buttons to default for specified vJoy Device"""
return _vj.ResetVJD(rID)
def ResetButtons(rID):
"""Reset all buttons to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
def ResetPovs(rID):
"""Reset all POV hats to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
| mit | 8,956,066,866,461,069,000 | 21.241667 | 102 | 0.688777 | false |
mvaled/sentry | tests/sentry/attachments/test_redis.py | 1 | 2148 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
import zlib
from sentry.cache.redis import RedisClusterCache, RbCache
from sentry.testutils import TestCase
from sentry.utils.imports import import_string
class FakeClient(object):
def get(self, key):
if key == "c:1:foo:a":
return '[{"name":"foo.txt","content_type":"text/plain"}]'
elif key == "c:1:foo:a:0":
return zlib.compress(b"Hello World!")
class RbCluster(object):
def get_routing_client(self):
return CLIENT
CLIENT = FakeClient()
RB_CLUSTER = RbCluster()
class RedisClusterAttachmentTest(TestCase):
@mock.patch("sentry.utils.redis.redis_clusters.get", return_value=CLIENT)
def test_process_pending_one_batch(self, cluster_get):
attachment_cache = import_string("sentry.attachments.redis.RedisClusterAttachmentCache")()
cluster_get.assert_any_call("rc-short")
assert isinstance(attachment_cache.inner, RedisClusterCache)
assert attachment_cache.inner.client is CLIENT
rv = attachment_cache.get("foo")
assert len(rv) == 1
attachment = rv[0]
assert attachment.meta() == {
"type": "event.attachment",
"name": "foo.txt",
"content_type": "text/plain",
}
assert attachment.data == b"Hello World!"
class RbAttachmentTest(TestCase):
@mock.patch("sentry.cache.redis.get_cluster_from_options", return_value=(RB_CLUSTER, {}))
def test_process_pending_one_batch(self, cluster_get):
attachment_cache = import_string("sentry.attachments.redis.RbAttachmentCache")(hosts=[])
cluster_get.assert_any_call("SENTRY_CACHE_OPTIONS", {"hosts": []})
assert isinstance(attachment_cache.inner, RbCache)
assert attachment_cache.inner.client is CLIENT
rv = attachment_cache.get("foo")
assert len(rv) == 1
attachment = rv[0]
assert attachment.meta() == {
"type": "event.attachment",
"name": "foo.txt",
"content_type": "text/plain",
}
assert attachment.data == b"Hello World!"
| bsd-3-clause | -668,644,714,577,843,700 | 32.046154 | 98 | 0.637337 | false |
millken/simple-rtmp-server | trunk/research/community/server.py | 1 | 4633 | #!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013-2014 winlin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
"""
the community is a default demo server for srs
"""
import sys
# reload sys model to enable the getdefaultencoding method.
reload(sys)
# set the default encoding to utf-8
# using exec to set the encoding, to avoid error in IDE.
exec("sys.setdefaultencoding('utf-8')")
assert sys.getdefaultencoding().lower() == "utf-8"
import os, json, time, datetime, cherrypy, threading
# simple log functions.
def trace(msg):
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "[%s][trace] %s"%(date, msg)
# enable crossdomain access for js-client
# define the following method:
# def OPTIONS(self, *args, **kwargs)
# enable_crossdomain()
# invoke this method to enable js to request crossdomain.
def enable_crossdomain():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers["Access-Control-Allow-Methods"] = "GET, POST, HEAD, PUT, DELETE"
# generate allow headers for crossdomain.
allow_headers = ["Cache-Control", "X-Proxy-Authorization", "X-Requested-With", "Content-Type"]
cherrypy.response.headers["Access-Control-Allow-Headers"] = ",".join(allow_headers)
# error codes definition
class Error:
# ok, success, completed.
success = 0
# HTTP RESTful path.
class Root(object):
exposed = True
def __init__(self):
self.api = Api()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success, "urls":{"api":"the api root"}})
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path.
class Api(object):
exposed = True
def __init__(self):
self.v1 = V1()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success,
"urls": {
"v1": "the api version 1.0"
}
});
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path. to access as:
# http://127.0.0.1:8085/api/v1/clients
class V1(object):
exposed = True
def __init__(self):
pass;
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
'''
main code start.
'''
# donot support use this module as library.
if __name__ != "__main__":
raise Exception("embed not support")
# check the user options
if len(sys.argv) <= 1:
print "SRS community server, Copyright (c) 2013-2014 winlin"
print "Usage: python %s <port>"%(sys.argv[0])
print " port: the port to listen at."
print "For example:"
print " python %s 1949"%(sys.argv[0])
print ""
print "See also: https://github.com/winlinvip/simple-rtmp-server"
sys.exit(1)
# parse port from user options.
port = int(sys.argv[1])
static_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "static-dir"))
trace("api server listen at port: %s, static_dir: %s"%(port, static_dir))
# cherrypy config.
conf = {
'global': {
'server.shutdown_timeout': 1,
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'tools.encode.on': True,
'tools.staticdir.on': True,
'tools.encode.encoding': "utf-8",
#'server.thread_pool': 2, # single thread server.
},
'/': {
'tools.staticdir.dir': static_dir,
'tools.staticdir.index': "index.html",
# for cherrypy RESTful api support
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}
}
# start cherrypy web engine
trace("start cherrypy server")
root = Root()
cherrypy.quickstart(root, '/', conf)
| mit | -3,654,521,204,986,091,500 | 31.398601 | 98 | 0.668465 | false |
golya/FuzzLabs | engine/tests/steps/modules.py | 1 | 1961 | from behave import *
import os
import sys
import inspect
ROOT_DIR = os.path.dirname(
os.path.abspath(
inspect.getfile(inspect.currentframe()
)))
sys.path.append(ROOT_DIR + "/../../classes")
from ConfigurationHandler import ConfigurationHandler
from ModuleHandler import ModuleHandler
@given('we have root and config')
def step_impl(context):
assert os.path.isfile(ROOT_DIR + "/../../etc/engine.config")
context.root = ROOT_DIR + "/../../"
config_file = ROOT_DIR + "/../../etc/engine.config"
context.config_data = ConfigurationHandler(config_file).get()
@when('we load the modules')
def step_impl(context):
context.module_inst = ModuleHandler(context.root, context.config_data)
context.modules_list = context.module_inst.loaded_modules
@then('we get a list of modules')
def step_impl(context):
status = type(context.modules_list) == list
if status:
for module in context.modules_list:
if not module.get('instance') or \
not module.get('name') or \
not module.get('mtime') or \
not module.get('type'):
status = False
break
context.module_inst.unload_modules()
assert status
@given('we have modules loaded')
def step_impl(context):
assert os.path.isfile(ROOT_DIR + "/../../etc/engine.config")
root_dir = ROOT_DIR + "/../../"
config_file = ROOT_DIR + "/../../etc/engine.config"
config_data = ConfigurationHandler(config_file).get()
context.module_inst = ModuleHandler(root_dir, config_data)
context.modules_list = context.module_inst.loaded_modules
status = type(context.modules_list) == list
assert status
@when('we unload the modules')
def step_impl(context):
context.module_inst.unload_modules()
@then('we get an empty list')
def step_impl(context):
assert context.module_inst.loaded_modules == []
| gpl-2.0 | 9,094,677,830,503,925,000 | 29.640625 | 74 | 0.63743 | false |
digwanderlust/pants | tests/python/pants_test/tasks/test_cache_manager.py | 1 | 4690 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import shutil
import tempfile
from pants.base.build_invalidator import CacheKey, CacheKeyGenerator
from pants.base.cache_manager import InvalidationCacheManager, InvalidationCheck, VersionedTarget
from pants_test.base_test import BaseTest
class AppendingCacheKeyGenerator(CacheKeyGenerator):
"""Generates cache keys for versions of target sets."""
@staticmethod
def combine_cache_keys(cache_keys):
if len(cache_keys) == 1:
return cache_keys[0]
else:
sorted_cache_keys = sorted(cache_keys) # For commutativity.
combined_id = ','.join([cache_key.id for cache_key in sorted_cache_keys])
combined_hash = ','.join([cache_key.hash for cache_key in sorted_cache_keys])
combined_num_sources = reduce(lambda x, y: x + y,
[cache_key.num_sources for cache_key in sorted_cache_keys], 0)
return CacheKey(combined_id, combined_hash, combined_num_sources)
def key_for_target(self, target, sources=None, transitive=False, fingerprint_strategy=None):
return CacheKey(target.id, target.id, target.num_chunking_units)
def key_for(self, tid, sources):
return CacheKey(tid, tid, len(sources))
def print_vt(vt):
print('%d (%s) %s: [ %s ]' % (len(vt.targets), vt.cache_key, vt.valid, ', '.join(['%s(%s)' % (v.id, v.cache_key) for v in vt.versioned_targets])))
class InvalidationCacheManagerTest(BaseTest):
class TestInvalidationCacheManager(InvalidationCacheManager):
def __init__(self, tmpdir):
InvalidationCacheManager.__init__(self, AppendingCacheKeyGenerator(), tmpdir, True)
def setUp(self):
super(InvalidationCacheManagerTest, self).setUp()
self._dir = tempfile.mkdtemp()
self.cache_manager = InvalidationCacheManagerTest.TestInvalidationCacheManager(self._dir)
def tearDown(self):
shutil.rmtree(self._dir, ignore_errors=True)
super(InvalidationCacheManagerTest, self).tearDown()
def make_vts(self, target):
return VersionedTarget(self.cache_manager, target, target.id)
def test_partition(self):
# The default EmptyPayload chunking unit happens to be 1, so each of these Targets
# has a chunking unit contribution of 1
a = self.make_target(':a', dependencies=[])
b = self.make_target(':b', dependencies=[a])
c = self.make_target(':c', dependencies=[b])
d = self.make_target(':d', dependencies=[c, a])
e = self.make_target(':e', dependencies=[d])
targets = [a, b, c, d, e]
def print_partitions(partitions):
strs = []
for partition in partitions:
strs.append('(%s)' % ', '.join([t.id for t in partition.targets]))
print('[%s]' % ' '.join(strs))
# Verify basic data structure soundness.
all_vts = self.cache_manager.wrap_targets(targets)
invalid_vts = filter(lambda vt: not vt.valid, all_vts)
self.assertEquals(5, len(invalid_vts))
self.assertEquals(5, len(all_vts))
vts_targets = [vt.targets[0] for vt in all_vts]
self.assertEquals(set(targets), set(vts_targets))
# Test a simple partition.
ic = InvalidationCheck(all_vts, [], 3)
partitioned = ic.all_vts_partitioned
print_partitions(partitioned)
# Several correct partitionings are possible, but in all cases 4 1-source targets will be
# added to the first partition before it exceeds the limit of 3, and the final target will
# be in a partition by itself.
self.assertEquals(2, len(partitioned))
self.assertEquals(4, len(partitioned[0].targets))
self.assertEquals(1, len(partitioned[1].targets))
# Test partition with colors.
red = 'red'
blue = 'blue'
colors = {
a: blue,
b: red,
c: red,
d: red,
e: blue
}
# As a reference, we partition without colors.
ic = InvalidationCheck(all_vts, [], 2)
partitioned = ic.all_vts_partitioned
print_partitions(partitioned)
self.assertEquals(2, len(partitioned))
self.assertEquals(3, len(partitioned[0].targets))
self.assertEquals(2, len(partitioned[1].targets))
# Now apply color restrictions.
ic = InvalidationCheck(all_vts, [], 2, target_colors=colors)
partitioned = ic.all_vts_partitioned
print_partitions(partitioned)
self.assertEquals(3, len(partitioned))
self.assertEquals(1, len(partitioned[0].targets))
self.assertEquals(3, len(partitioned[1].targets))
self.assertEquals(1, len(partitioned[2].targets))
| apache-2.0 | -6,835,737,676,427,223,000 | 36.52 | 148 | 0.685501 | false |
amanzi/ats-dev | tools/utils/transect_data.py | 2 | 7741 | """Loads and/or plots 2D, topologlically structured data on quadrilaterals using matplotlib.
"""
import sys,os
import numpy as np
import h5py
import mesh
import colors
def fullname(varname):
fullname = varname
if not '.cell.' in fullname:
fullname = fullname+'.cell.0'
return fullname
def transect_data(varnames, keys='all', directory=".", filename="visdump_data.h5",
mesh_filename="visdump_mesh.h5", coord_order=None, deformable=False, return_map=False):
"""Pulls simulation output into structured 2D arrays for transect-based, (i,j) indexing.
Input:
varnames | A list of variable names to pull, e.g.
| ['saturation_liquid', 'saturation_ice'], or a single variable
| name, e.g. 'saturation_liquid'
keys | Indices of timesteps to pull. Either an int (i.e. 0, -1, etc)
| for the kth timestep, or a list of ints, or 'all'.
directory | Directory of the run. Defaults to '.'
filename | Filename of the run. Defaults to 'visdump_data.h5'
mesh_filename | Filename of the mesh. Defaults to 'visdump_mesh.h5'
coord_order | Order of the transect coordinates. Defaults to ['x','z']. The
| mesh is sorted in this order.
deformable | Is the mesh deforming?
return_map | See return value below.
Output:
Output is an array of shape:
( len(varnames+2), len(keys), n_cells_coord_order[0], n_cells_coord_order[1] )
data[0,0,:,:] is the coord_order[0] centroid
data[1,0,:,:] is the coord_order[1] centroid
data[i+2,k,:,:] is the ith varname data at the kth requested timestep, sorted in
the same way as the centroids.
Note that the data is re-ordered in INCREASING coordinate, i.e. bottom to top in z.
If return_map is True, then returns a tuple, (data, map) where
map is a (NX,NZ) array of integers specifying which global id
corresponds to the (i,j) cell. This is useful for mapping input
data back INTO the unstructured mesh.
Example usage:
Calculate and plot the thaw depth at step 5.
// Pull saturation ice -- TD is where sat ice = 0."
data = transect_data(['saturation_ice', 5)
// x coordinate for plotting
x = data[0,0,:,0]
// for each column, find highest z where sat_ice > 0.
td_i = np.array([np.where(data[2,0,i,:] > 0.)[0][-1] for i in range(data.shape[2])])
// now that we have an index into the highest cell with ice, determine td as the
// mean of the highest cell with ice and the one above that. Note this assumes
// all columns have some thawing.
td_z = np.array( [ (dat[1,0,i,td_i[i]] + dat[1,0,i,td_i[i+1]]) / 2.
for i in range(len(td_i)) ] )
plt.plot(x, td_z)
"""
if coord_order is None:
coord_order = ['x','z']
if type(varnames) is str:
varnames = [varnames,]
# get centroids
xyz = mesh.meshElemCentroids(mesh_filename, directory)
# round to avoid issues
xyz = np.round(xyz, decimals=5)
# get ordering of centroids
dtype = [(coord_order[0], float), (coord_order[1], float)]
num_order = []
for i in coord_order:
if i == 'x':
num_order.append(0)
elif i == 'y':
num_order.append(1)
elif i == 'z':
num_order.append(2)
xyz_sort_order = np.array([tuple([xyz[i,x] for x in num_order]) for i in range(len(xyz))], dtype=dtype)
xyz_sorting = xyz_sort_order.argsort(order=coord_order)
with h5py.File(os.path.join(directory,filename),'r') as dat:
keys_avail = dat[fullname(varnames[0])].keys()
keys_avail.sort(lambda a,b: int.__cmp__(int(a),int(b)))
if keys == 'all':
keys = keys_avail
elif type(keys) is str:
keys = [keys,]
elif type(keys) is int:
keys = [keys_avail[keys],]
elif type(keys) is slice:
keys = keys_avail[keys]
elif type(keys) is list:
if all(type(k) is int for k in keys):
keys = [keys_avail[k] for k in keys]
elif all(type(k) is str for k in keys):
pass
else:
raise RuntimeError("Keys requested cannot be processed -- should be 'all', int, or str key, or list of ints or strs.")
# get data
vals = np.zeros((len(varnames)+2, len(keys), len(xyz)), 'd')
for i,key in enumerate(keys):
if deformable:
xyz = mesh.meshElemCentroids(mesh_filename, directory)
vals[0,i,:] = xyz[xyz_sorting,num_order[0]]
vals[1,i,:] = xyz[xyz_sorting,num_order[1]]
for j,varname in enumerate(varnames):
vals[j+2,i,:] = dat[fullname(varname)][key][:,0][xyz_sorting]
# reshape the data
# determine nx
nx = len(set(vals[0,0,:]))
nz = vals.shape[2] / nx
if (nx * nz != vals.shape[2]):
raise RuntimeError("Assumption about first coordinate being cleanly binnable is falling apart -- ask Ethan to rethink this algorithm!")
shp = vals.shape
if not return_map:
return vals.reshape(shp[0], shp[1], nx, nz)
else:
return vals.reshape(shp[0], shp[1], nx, nz), xyz_sorting.reshape(nx, nz)
def plot(dataset, ax, cax=None, vmin=None, vmax=None, cmap="jet",
label=None, mesh_filename="visdump_mesh.h5", directory=".", y_coord=0.0,
linewidths=1):
"""Draws a dataset on an ax."""
import matplotlib.collections
from matplotlib import pyplot as plt
if vmin is None:
vmin = dataset.min()
if vmax is None:
vmax = dataset.max()
# get the mesh and collapse to 2D
etype, coords, conn = mesh.meshElemXYZ(filename=mesh_filename, directory=directory)
if etype is not 'HEX':
raise RuntimeError("Only works for Hexs")
coords2 = np.array([[coords[i][0::2] for i in c[1:] if abs(coords[i][1] - y_coord) < 1.e-8] for c in conn])
try:
assert coords2.shape[2] == 2
assert coords2.shape[1] == 4
except AssertionError:
print(coords2.shape)
for c in conn:
if len(c) != 9:
print c
raise RuntimeError("what is a conn?")
coords3 = np.array([coords[i][:] for i in c[1:] if abs(coords[i][1] - y_coord) < 1.e-8])
if coords3.shape[0] != 4:
print coords
raise RuntimeError("Unable to squash to 2D")
# reorder anti-clockwise
for i,c in enumerate(coords2):
centroid = c.mean(axis=0)
def angle(p1,p2):
a1 = np.arctan2((p1[1]-centroid[1]),(p1[0]-centroid[0]))
a2 = np.arctan2((p2[1]-centroid[1]),(p2[0]-centroid[0]))
if a1 < a2:
return -1
elif a2 < a1:
return 1
else:
return 0
c2 = np.array(sorted(c,angle))
coords2[i] = c2
polygons = matplotlib.collections.PolyCollection(coords2, edgecolor='k', cmap=cmap, linewidths=linewidths)
polygons.set_array(dataset)
polygons.set_clim(vmin,vmax)
ax.add_collection(polygons)
xmin = min(c[0] for c in coords.itervalues())
xmax = max(c[0] for c in coords.itervalues())
zmin = min(c[2] for c in coords.itervalues())
zmax = max(c[2] for c in coords.itervalues())
ax.set_xlim(xmin,xmax)
ax.set_ylim(zmin,zmax)
if cax is not None:
cb = plt.colorbar(polygons, cax=cax)
if label is not None:
cb.set_label(label)
return ((xmin,xmax),(zmin,zmax))
| bsd-3-clause | -6,002,022,548,617,249,000 | 35.687204 | 143 | 0.575119 | false |
gunan/tensorflow | tensorflow/python/keras/layers/preprocessing/benchmarks/categorical_encoding_benchmark.py | 1 | 3177 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras categorical_encoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.layers.preprocessing import categorical_encoding
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
class BenchmarkLayer(benchmark.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, output_mode, batch_size, sequence_length,
max_tokens):
input_t = keras.Input(shape=(sequence_length,), dtype=dtypes.int32)
layer = categorical_encoding.CategoricalEncoding(
max_tokens=max_tokens, output_mode=output_mode)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_tensor_slices(
random_ops.random_uniform([batch_size * 10, sequence_length],
minval=0,
maxval=max_tokens - 1,
dtype=dtypes.int32))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "categorical_encoding|batch_%s|seq_length_%s|%s_max_tokens" % (
batch_size, sequence_length, max_tokens)
self.report_benchmark(iters=num_repeats, wall_time=avg_time, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 256, 2048]:
for sequence_length in [10, 1000]:
for num_tokens in [100, 1000, 20000]:
self.run_dataset_implementation(
output_mode="count",
batch_size=batch,
sequence_length=sequence_length,
max_tokens=num_tokens)
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,262,048,413,489,094,000 | 35.517241 | 80 | 0.652188 | false |
mommermi/callhorizons | callhorizons/callhorizons.py | 1 | 60381 | """CALLHORIZONS - a Python interface to access JPL HORIZONS
ephemerides and orbital elements.
This module provides a convenient python interface to the JPL
HORIZONS system by directly accessing and parsing the HORIZONS
website. Ephemerides can be obtained through get_ephemerides,
orbital elements through get_elements. Function
export2pyephem provides an interface to the PyEphem module.
michael.mommert (at) nau.edu, latest version: v1.0.5, 2017-05-05.
This code is inspired by code created by Alex Hagen.
* v1.
* v1.0.5: 15-epoch limit for set_discreteepochs removed
* v1.0.4: improved asteroid and comet name parsing
* v1.0.3: ObsEclLon and ObsEclLat added to get_ephemerides
* v1.0.2: Python 3.5 compatibility implemented
* v1.0.1: get_ephemerides fixed
* v1.0: bugfixes completed, planets/satellites accessible, too
* v0.9: first release
"""
from __future__ import (print_function, unicode_literals)
import re
import sys
import time
import numpy as np
import warnings
try:
# Python 3
import urllib.request as urllib
except ImportError:
# Python 2
import urllib2 as urllib
warnings.filterwarnings('once', category=DeprecationWarning)
warnings.warn(('CALLHORIZONS is not maintained anymore; please use '
'astroquery.jplhorizons instead (https://github.com/'
'astropy/astroquery)'),
DeprecationWarning)
def _char2int(char):
""" translate characters to integer values (upper and lower case)"""
if char.isdigit():
return int(float(char))
if char.isupper():
return int(char, 36)
else:
return 26 + int(char, 36)
class query():
# constructor
def __init__(self, targetname, smallbody=True, cap=True, nofrag=False,
comet=False, asteroid=False):
"""Initialize query to Horizons
:param targetname: HORIZONS-readable target number, name, or designation
:param smallbody: boolean use ``smallbody=False`` if targetname is a
planet or spacecraft (optional, default: `True`);
also use `True` if the targetname is exact and
should be queried as is
:param cap: set to `True` to return the current apparition for
comet targets
:param nofrag: set to `True` to disable HORIZONS's comet
fragment search
:param comet: set to `True` if this is a comet (will override
automatic targetname parsing)
:param asteroid: set to `True` if this is an asteroid (will override
automatic targetname parsing)
:return: None
"""
self.targetname = str(targetname)
self.not_smallbody = not smallbody
self.cap = cap
self.nofrag = nofrag
self.comet = comet # is this object a comet?
self.asteroid = asteroid # is this object an asteroid?
self.start_epoch = None
self.stop_epoch = None
self.step_size = None
self.discreteepochs = None
self.url = None
self.data = None
assert not (
self.comet and self.asteroid), 'Only one of comet or asteroid can be `True`.'
return None
# small body designation parsing
def parse_comet(self):
"""Parse `targetname` as if it were a comet.
:return: (string or None, int or None, string or None);
The designation, number and prefix, and name of the comet as derived
from `self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored.
:example: the following table shows the result of the parsing:
+--------------------------------+--------------------------------+
|targetname |(desig, prefixnumber, name) |
+================================+================================+
|1P/Halley |(None, '1P', 'Halley') |
+--------------------------------+--------------------------------+
|3D/Biela |(None, '3D', 'Biela') |
+--------------------------------+--------------------------------+
|9P/Tempel 1 |(None, '9P', 'Tempel 1') |
+--------------------------------+--------------------------------+
|73P/Schwassmann Wachmann 3 C |(None, '73P', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-BB |(None, '73P-BB', None) |
+--------------------------------+--------------------------------+
|322P |(None, '322P', None) |
+--------------------------------+--------------------------------+
|X/1106 C1 |('1166 C1', 'X', None) |
+--------------------------------+--------------------------------+
|P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', |
| |'McNaught-Hartley') |
+--------------------------------+--------------------------------+
|P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/-146 P1 |('-146 P1', 'C', None) |
+--------------------------------+--------------------------------+
|C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/2013 US10 |('2013 US10', 'C', None) |
+--------------------------------+--------------------------------+
|C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') |
+--------------------------------+--------------------------------+
|C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') |
+--------------------------------+--------------------------------+
"""
import re
pat = ('^(([1-9]+[PDCXAI](-[A-Z]{1,2})?)|[PDCXAI]/)' + # prefix [0,1,2]
'|([-]?[0-9]{3,4}[ _][A-Z]{1,2}([0-9]{1,3})?(-[1-9A-Z]{0,2})?)' +
# designation [3,4]
('|(([A-Z][a-z]?[A-Z]*[a-z]*[ -]?[A-Z]?[1-9]*[a-z]*)' +
'( [1-9A-Z]{1,2})*)') # name [5,6]
)
m = re.findall(pat, self.targetname.strip())
# print(m)
prefixnumber = None
desig = None
name = None
if len(m) > 0:
for el in m:
# prefix/number
if len(el[0]) > 0:
prefixnumber = el[0].replace('/', '')
# designation
if len(el[3]) > 0:
desig = el[3].replace('_', ' ')
# name
if len(el[5]) > 0:
if len(el[5]) > 1:
name = el[5]
return (desig, prefixnumber, name)
def parse_asteroid(self):
"""Parse `targetname` as if it were a asteroid.
:return: (string or None, int or None, string or None);
The designation, number, and name of the asteroid as derived from
`self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored. Packed designations and numbers are unpacked.
:example: the following table shows the result of the parsing:
+--------------------------------+---------------------------------+
|targetname |(desig, number, name) |
+================================+=================================+
|1 |(None, 1, None) |
+--------------------------------+---------------------------------+
|2 Pallas |(None, 2, Pallas) |
+--------------------------------+---------------------------------+
|\(2001\) Einstein |(None, 2001, Einstein) |
+--------------------------------+---------------------------------+
|1714 Sy |(None, 1714, Sy) |
+--------------------------------+---------------------------------+
|2014 MU69 |(2014 MU69, None, None) |
+--------------------------------+---------------------------------+
|(228195) 6675 P-L |(6675 P-L, 228195, None) |
+--------------------------------+---------------------------------+
|4101 T-3 |(4101 T-3, None, None) |
+--------------------------------+---------------------------------+
|4015 Wilson-Harrington (1979 VA)|(1979 VA, 4015, Wilson-Harrington|
+--------------------------------+---------------------------------+
|J95X00A |(1995 XA, None, None) |
+--------------------------------+---------------------------------+
|K07Tf8A |(2007 TA418, None, None) |
+--------------------------------+---------------------------------+
|G3693 |(None, 163693, None) |
+--------------------------------+---------------------------------+
|2017 U1 |(None, None, None) |
+--------------------------------+---------------------------------+
"""
pat = ('(([1-2][0-9]{0,3}[ _][A-Z]{2}[0-9]{0,3})' # designation [0,1]
'|([1-9][0-9]{3}[ _](P-L|T-[1-3])))' # Palomar-Leiden [0,2,3]
'|([IJKL][0-9]{2}[A-Z][0-9a-z][0-9][A-Z])' # packed desig [4]
'|([A-Za-z][0-9]{4})' # packed number [5]
'|([A-Z][A-Z]*[a-z][a-z]*[^0-9]*'
'[ -]?[A-Z]?[a-z]*[^0-9]*)' # name [6]
'|([1-9][0-9]*(\b|$))') # number [7,8]
# regex patterns that will be ignored as they might cause
# confusion
non_pat = ('([1-2][0-9]{0,3}[ _][A-Z][0-9]*(\b|$))') # comet desig
if sys.version_info > (3, 0):
raw = self.targetname.translate(str.maketrans('()', ' ')).strip()
else:
import string
raw = self.targetname.translate(string.maketrans('()',
' ')).strip()
# reject non_pat patterns
non_m = re.findall(non_pat, raw)
# print('reject', raw, non_m)
if len(non_m) > 0:
for ps in non_m:
for p in ps:
if p == '':
continue
raw = raw[:raw.find(p)] + raw[raw.find(p)+len(p):]
# match target patterns
m = re.findall(pat, raw)
# print(raw, m)
desig = None
number = None
name = None
if len(m) > 0:
for el in m:
# designation
if len(el[0]) > 0:
desig = el[0]
# packed designation (unpack here)
elif len(el[4]) > 0:
ident = el[4]
# old designation style, e.g.: 1989AB
if (len(ident.strip()) < 7 and ident[:4].isdigit() and
ident[4:6].isalpha()):
desig = ident[:4]+' '+ident[4:6]
# Palomar Survey
elif ident.find("PLS") == 0:
desig = ident[3:] + " P-L"
# Trojan Surveys
elif ident.find("T1S") == 0:
desig = ident[3:] + " T-1"
elif ident.find("T2S") == 0:
desig = ident[3:] + " T-2"
elif ident.find("T3S") == 0:
desig = ident[3:] + " T-3"
# insert blank in designations
elif (ident[0:4].isdigit() and ident[4:6].isalpha() and
ident[4] != ' '):
desig = ident[:4]+" "+ident[4:]
# MPC packed 7-digit designation
elif (ident[0].isalpha() and ident[1:3].isdigit() and
ident[-1].isalpha() and ident[-2].isdigit()):
yr = str(_char2int(ident[0]))+ident[1:3]
let = ident[3]+ident[-1]
num = str(_char2int(ident[4]))+ident[5]
num = num.lstrip("0")
desig = yr+' '+let+num
# nothing to do
else:
desig = ident
# packed number (unpack here)
elif len(el[5]) > 0:
ident = el[5]
number = ident = int(str(_char2int(ident[0]))+ident[1:])
# number
elif len(el[7]) > 0:
if sys.version_info > (3, 0):
number = int(float(el[7].translate(str.maketrans('()',
' '))))
else:
import string
number = int(float(el[7].translate(string.maketrans('()',
' '))))
# name (strip here)
elif len(el[6]) > 0:
if len(el[6].strip()) > 1:
name = el[6].strip()
return (desig, number, name)
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time.
"""
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None)
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid."""
if self.asteroid is not None:
return self.asteroid
elif self.comet is not None:
return not self.comet
else:
return any(self.parse_asteroid()) is not None
# set epochs
def set_epochrange(self, start_epoch, stop_epoch, step_size):
"""Set a range of epochs, all times are UT
:param start_epoch: str;
start epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param stop_epoch: str;
final epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param step_size: str;
epoch step size, e.g., '1d' for 1 day, '10m' for 10 minutes...
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-26', '2016-10-25', '1d')
Note that dates are mandatory; if no time is given, midnight is assumed.
"""
self.start_epoch = start_epoch
self.stop_epoch = stop_epoch
self.step_size = step_size
return None
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian
Dates
:param discreteepochs: array_like
list or 1D array of floats or strings
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
"""
if not isinstance(discreteepochs, (list, np.ndarray)):
discreteepochs = [discreteepochs]
self.discreteepochs = list(discreteepochs)
# data access functions
@property
def fields(self):
"""returns list of available properties for all epochs"""
try:
return self.data.dtype.names
except AttributeError:
return []
def __len__(self):
"""returns total number of epochs that have been queried"""
try:
# Cast to int because a long is returned from shape on Windows.
return int(self.data.shape[0])
except AttributeError:
return 0
@property
def dates(self):
"""returns list of epochs that have been queried (format 'YYYY-MM-DD HH-MM-SS')"""
try:
return self.data['datetime']
except:
return []
@property
def query(self):
"""returns URL that has been used in calling HORIZONS"""
try:
return self.url
except:
return []
@property
def dates_jd(self):
"""returns list of epochs that have been queried (Julian Dates)"""
try:
return self.data['datetime_jd']
except:
return []
def __repr__(self):
"""returns brief query information"""
return "<callhorizons.query object: %s>" % self.targetname
def __str__(self):
"""returns information on the current query as string"""
output = "targetname: %s\n" % self.targetname
if self.discreteepochs is not None:
output += "discrete epochs: %s\n" % \
" ".join([str(epoch) for epoch in self.discreteepochs])
if (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
output += "epoch range from %s to %s in steps of %s\n" % \
(self.start_epoch, self.stop_epoch, self.step_size)
output += "%d data sets queried with %d different fields" % \
(len(self), len(self.fields))
return output
def __getitem__(self, key):
"""provides access to query data
:param key: str/int;
epoch index or property key
:return: query data according to key
"""
# check if data exist
if self.data is None or len(self.data) == 0:
print('CALLHORIZONS ERROR: run get_ephemerides or get_elements',
'first')
return None
return self.data[key]
# call functions
def get_ephemerides(self, observatory_code,
airmass_lessthan=99,
solar_elongation=(0, 180),
skip_daylight=False):
"""Call JPL HORIZONS website to obtain ephemerides based on the
provided targetname, epochs, and observatory_code. For a list
of valid observatory codes, refer to
http://minorplanetcenter.net/iau/lists/ObsCodesF.html
:param observatory_code: str/int;
observer's location code according to Minor Planet Center
:param airmass_lessthan: float;
maximum airmass (optional, default: 99)
:param solar_elongation: tuple;
permissible solar elongation range (optional, deg)
:param skip_daylight: boolean;
crop daylight epoch during query (optional)
:result: int; number of epochs queried
:example: >>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> print (ceres.get_ephemerides(568), 'epochs queried')
The queried properties and their definitions are:
+------------------+-----------------------------------------------+
| Property | Definition |
+==================+===============================================+
| targetname | official number, name, designation [string] |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| datetime | epoch date and time (str, YYYY-MM-DD HH:MM:SS)|
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float) |
+------------------+-----------------------------------------------+
| solar_presence | information on Sun's presence (str) |
+------------------+-----------------------------------------------+
| lunar_presence | information on Moon's presence (str) |
+------------------+-----------------------------------------------+
| RA | target RA (float, J2000.0) |
+------------------+-----------------------------------------------+
| DEC | target DEC (float, J2000.0) |
+------------------+-----------------------------------------------+
| RA_rate | target rate RA (float, arcsec/s) |
+------------------+-----------------------------------------------+
| DEC_rate | target RA (float, arcsec/s, includes cos(DEC))|
+------------------+-----------------------------------------------+
| AZ | Azimuth meas East(90) of North(0) (float, deg)|
+------------------+-----------------------------------------------+
| EL | Elevation (float, deg) |
+------------------+-----------------------------------------------+
| airmass | target optical airmass (float) |
+------------------+-----------------------------------------------+
| magextinct | V-mag extinction due airmass (float, mag) |
+------------------+-----------------------------------------------+
| V | V magnitude (comets: total mag) (float, mag) |
+------------------+-----------------------------------------------+
| illumination | fraction of illuminated disk (float) |
+------------------+-----------------------------------------------+
| EclLon | heliocentr. ecl. long. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| EclLat | heliocentr. ecl. lat. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| ObsEclLon | obscentr. ecl. long. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| ObsEclLat | obscentr. ecl. lat. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| r | heliocentric distance (float, au) |
+------------------+-----------------------------------------------+
| r_rate | heliocentric radial rate (float, km/s) |
+------------------+-----------------------------------------------+
| delta | distance from the observer (float, au) |
+------------------+-----------------------------------------------+
| delta_rate | obs-centric radial rate (float, km/s) |
+------------------+-----------------------------------------------+
| lighttime | one-way light time (float, s) |
+------------------+-----------------------------------------------+
| elong | solar elongation (float, deg) |
+------------------+-----------------------------------------------+
| elongFlag | app. position relative to Sun (str) |
+------------------+-----------------------------------------------+
| alpha | solar phase angle (float, deg) |
+------------------+-----------------------------------------------+
| sunTargetPA | PA of Sun->target vector (float, deg, EoN) |
+------------------+-----------------------------------------------+
| velocityPA | PA of velocity vector (float, deg, EoN) |
+------------------+-----------------------------------------------+
| GlxLon | galactic longitude (float, deg) |
+------------------+-----------------------------------------------+
| GlxLat | galactic latitude (float, deg) |
+------------------+-----------------------------------------------+
| RA_3sigma | 3sigma pos. unc. in RA (float, arcsec) |
+------------------+-----------------------------------------------+
| DEC_3sigma | 3sigma pos. unc. in DEC (float, arcsec) |
+------------------+-----------------------------------------------+
"""
# queried fields (see HORIZONS website for details)
# if fields are added here, also update the field identification below
quantities = '1,3,4,8,9,10,18,19,20,21,23,24,27,31,33,36'
# encode objectname for use in URL
objectname = urllib.quote(self.targetname.encode("utf8"))
# construct URL for HORIZONS query
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l" \
+ "&TABLE_TYPE='OBSERVER'" \
+ "&QUANTITIES='" + str(quantities) + "'" \
+ "&CSV_FORMAT='YES'" \
+ "&ANG_FORMAT='DEG'" \
+ "&CAL_FORMAT='BOTH'" \
+ "&SOLAR_ELONG='" + str(solar_elongation[0]) + "," \
+ str(solar_elongation[1]) + "'" \
+ "&CENTER='"+str(observatory_code)+"'"
if self.not_smallbody:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "'"
elif self.cap and self.comet:
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("CAP'" if self.cap else "'")
elif self.isorbit_record():
# Comet orbit record. Do not use DES, CAP. This test must
# occur before asteroid test.
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
elif self.isasteroid() and not self.comet:
# for asteroids, use 'DES="designation";'
for ident in self.parse_asteroid():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='" + \
urllib.quote(str(ident).encode("utf8")) + "%3B'"
elif self.iscomet() and not self.asteroid:
# for comets, potentially append the current apparition
# (CAP) parameter, or the fragmentation flag (NOFRAG)
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("NOFRAG%3B" if self.nofrag else "") + \
("CAP'" if self.cap else "'")
# elif (not self.targetname.replace(' ', '').isalpha() and not
# self.targetname.isdigit() and not
# self.targetname.islower() and not
# self.targetname.isupper()):
# # lower case + upper case + numbers = pot. case sensitive designation
# url += "&COMMAND='DES=" + \
# urllib.quote(self.targetname.encode("utf8")) + "%3B'"
else:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
if self.discreteepochs is not None:
url += "&TLIST="
for date in self.discreteepochs:
url += "'" + str(date) + "'"
elif (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
url += "&START_TIME='" \
+ urllib.quote(self.start_epoch.encode("utf8")) + "'" \
+ "&STOP_TIME='" \
+ urllib.quote(self.stop_epoch.encode("utf8")) + "'" \
+ "&STEP_SIZE='" + str(self.step_size) + "'"
else:
raise IOError('no epoch information given')
if airmass_lessthan < 99:
url += "&AIRMASS='" + str(airmass_lessthan) + "'"
if skip_daylight:
url += "&SKIP_DAYLT='YES'"
else:
url += "&SKIP_DAYLT='NO'"
self.url = url
# print (url)
# call HORIZONS
i = 0 # count number of connection tries
while True:
try:
src = urllib.urlopen(url).readlines()
break
except urllib.URLError:
time.sleep(0.1)
# in case the HORIZONS website is blocked (due to another query)
# wait 0.1 second and try again
i += 1
if i > 50:
return 0 # website could not be reached
# disseminate website source code
# identify header line and extract data block (ephemerides data)
# also extract targetname, absolute mag. (H), and slope parameter (G)
headerline = []
datablock = []
in_datablock = False
H, G = np.nan, np.nan
for idx, line in enumerate(src):
line = line.decode('UTF-8')
if "Date__(UT)__HR:MN" in line:
headerline = line.split(',')
if "$$EOE\n" in line:
in_datablock = False
if in_datablock:
datablock.append(line)
if "$$SOE\n" in line:
in_datablock = True
if "Target body name" in line:
targetname = line[18:50].strip()
if ("rotational period in hours)" in
src[idx].decode('UTF-8')):
HGline = src[idx+2].decode('UTF-8').split('=')
if 'B-V' in HGline[2] and 'G' in HGline[1]:
try:
H = float(HGline[1].rstrip('G'))
except ValueError:
pass
try:
G = float(HGline[2].rstrip('B-V'))
except ValueError:
pass
if ("Multiple major-bodies match string" in
src[idx].decode('UTF-8') or
("Matching small-bodies" in src[idx].decode('UTF-8') and not
"No matches found" in src[idx+1].decode('UTF-8'))):
raise ValueError('Ambiguous target name; check URL: %s' %
url)
if ("Matching small-bodies" in src[idx].decode('UTF-8') and
"No matches found" in src[idx+1].decode('UTF-8')):
raise ValueError('Unknown target; check URL: %s' % url)
# field identification for each line
ephemerides = []
for line in datablock:
line = line.split(',')
# ignore line that don't hold any data
if len(line) < len(quantities.split(',')):
continue
this_eph = []
fieldnames = []
datatypes = []
# create a dictionary for each date (each line)
for idx, item in enumerate(headerline):
if ('Date__(UT)__HR:MN' in item):
this_eph.append(line[idx].strip())
fieldnames.append('datetime')
datatypes.append(object)
if ('Date_________JDUT' in item):
this_eph.append(np.float64(line[idx]))
fieldnames.append('datetime_jd')
datatypes.append(np.float64)
# read out and convert solar presence
try:
this_eph.append({'*': 'daylight', 'C': 'civil twilight',
'N': 'nautical twilight',
'A': 'astronomical twilight',
' ': 'dark',
't': 'transiting'}[line[idx+1]])
except KeyError:
this_eph.append('n.a.')
fieldnames.append('solar_presence')
datatypes.append(object)
# read out and convert lunar presence
try:
this_eph.append({'m': 'moonlight',
' ': 'dark'}[line[idx+2]])
except KeyError:
this_eph.append('n.a.')
fieldnames.append('lunar_presence')
datatypes.append(object)
if (item.find('R.A._(ICRF/J2000.0)') > -1):
this_eph.append(np.float64(line[idx]))
fieldnames.append('RA')
datatypes.append(np.float64)
if (item.find('DEC_(ICRF/J2000.0)') > -1):
this_eph.append(np.float64(line[idx]))
fieldnames.append('DEC')
datatypes.append(np.float64)
if (item.find('dRA*cosD') > -1):
try:
this_eph.append(np.float64(line[idx])/3600.) # "/s
except ValueError:
this_eph.append(np.nan)
fieldnames.append('RA_rate')
datatypes.append(np.float64)
if (item.find('d(DEC)/dt') > -1):
try:
this_eph.append(np.float64(line[idx])/3600.) # "/s
except ValueError:
this_eph.append(np.nan)
fieldnames.append('DEC_rate')
datatypes.append(np.float64)
if (item.find('Azi_(a-app)') > -1):
try: # if AZ not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
fieldnames.append('AZ')
datatypes.append(np.float64)
except ValueError:
pass
if (item.find('Elev_(a-app)') > -1):
try: # if EL not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
fieldnames.append('EL')
datatypes.append(np.float64)
except ValueError:
pass
if (item.find('a-mass') > -1):
try: # if airmass not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('airmass')
datatypes.append(np.float64)
if (item.find('mag_ex') > -1):
try: # if mag_ex not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('magextinct')
datatypes.append(np.float64)
if (item.find('APmag') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('V')
datatypes.append(np.float64)
if (item.find('Illu%') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('illumination')
datatypes.append(np.float64)
if (item.find('hEcl-Lon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('EclLon')
datatypes.append(np.float64)
if (item.find('hEcl-Lat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('EclLat')
datatypes.append(np.float64)
if (item.find('ObsEcLon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('ObsEclLon')
datatypes.append(np.float64)
if (item.find('ObsEcLat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('ObsEclLat')
datatypes.append(np.float64)
if (item.find(' r') > -1) and \
(headerline[idx+1].find("rdot") > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('r')
datatypes.append(np.float64)
if (item.find('rdot') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('r_rate')
datatypes.append(np.float64)
if (item.find('delta') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('delta')
datatypes.append(np.float64)
if (item.find('deldot') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('delta_rate')
datatypes.append(np.float64)
if (item.find('1-way_LT') > -1):
try:
this_eph.append(np.float64(line[idx])*60.) # seconds
except ValueError:
this_eph.append(np.nan)
fieldnames.append('lighttime')
datatypes.append(np.float64)
if (item.find('S-O-T') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('elong')
datatypes.append(np.float64)
# in the case of space telescopes, '/r S-T-O' is used;
# ground-based telescopes have both parameters in separate
# columns
if (item.find('/r S-T-O') > -1):
this_eph.append({'/L': 'leading', '/T': 'trailing'}
[line[idx].split()[0]])
fieldnames.append('elongFlag')
datatypes.append(object)
try:
this_eph.append(np.float64(line[idx].split()[1]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('alpha')
datatypes.append(np.float64)
elif (item.find('S-T-O') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('alpha')
datatypes.append(np.float64)
elif (item.find('/r') > -1):
this_eph.append({'/L': 'leading', '/T': 'trailing',
'/?': 'not defined'}
[line[idx]])
fieldnames.append('elongFlag')
datatypes.append(object)
if (item.find('PsAng') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('sunTargetPA')
datatypes.append(np.float64)
if (item.find('PsAMV') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('velocityPA')
datatypes.append(np.float64)
if (item.find('GlxLon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('GlxLon')
datatypes.append(np.float64)
if (item.find('GlxLat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('GlxLat')
datatypes.append(np.float64)
if (item.find('RA_3sigma') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('RA_3sigma')
datatypes.append(np.float64)
if (item.find('DEC_3sigma') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('DEC_3sigma')
datatypes.append(np.float64)
# in the case of a comet, use total mag for V
if (item.find('T-mag') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('V')
datatypes.append(np.float64)
# append target name
this_eph.append(targetname)
fieldnames.append('targetname')
datatypes.append(object)
# append H
this_eph.append(H)
fieldnames.append('H')
datatypes.append(np.float64)
# append G
this_eph.append(G)
fieldnames.append('G')
datatypes.append(np.float64)
if len(this_eph) > 0:
ephemerides.append(tuple(this_eph))
if len(ephemerides) == 0:
return 0
# combine ephemerides with column names and data types into ndarray
assert len(ephemerides[0]) == len(fieldnames) == len(datatypes)
self.data = np.array(ephemerides,
dtype=[(str(fieldnames[i]), datatypes[i]) for i
in range(len(fieldnames))])
return len(self)
def get_elements(self, center='500@10', asteroid=False, comet=False):
"""Call JPL HORIZONS website to obtain orbital elements based on the
provided targetname, epochs, and center code. For valid center
codes, please refer to http://ssd.jpl.nasa.gov/horizons.cgi
:param center: str;
center body (default: 500@10 = Sun)
:result: int; number of epochs queried
:example: >>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> print (ceres.get_elements(), 'epochs queried')
The queried properties and their definitions are:
+------------------+-----------------------------------------------+
| Property | Definition |
+==================+===============================================+
| targetname | official number, name, designation [string] |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float) |
+------------------+-----------------------------------------------+
| e | eccentricity (float) |
+------------------+-----------------------------------------------+
| p | periapsis distance (float, au) |
+------------------+-----------------------------------------------+
| a | semi-major axis (float, au) |
+------------------+-----------------------------------------------+
| incl | inclination (float, deg) |
+------------------+-----------------------------------------------+
| node | longitude of Asc. Node (float, deg) |
+------------------+-----------------------------------------------+
| argper | argument of the perifocus (float, deg) |
+------------------+-----------------------------------------------+
| Tp | time of periapsis (float, Julian Date) |
+------------------+-----------------------------------------------+
| meananomaly | mean anomaly (float, deg) |
+------------------+-----------------------------------------------+
| trueanomaly | true anomaly (float, deg) |
+------------------+-----------------------------------------------+
| period | orbital period (float, Earth yr) |
+------------------+-----------------------------------------------+
| Q | apoapsis distance (float, au) |
+------------------+-----------------------------------------------+
"""
# encode objectname for use in URL
objectname = urllib.quote(self.targetname.encode("utf8"))
# call Horizons website and extract data
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l" \
+ "&TABLE_TYPE='ELEMENTS'" \
+ "&CSV_FORMAT='YES'" \
+ "&CENTER='" + str(center) + "'" \
+ "&OUT_UNITS='AU-D'" \
+ "&REF_PLANE='ECLIPTIC'" \
+ "REF_SYSTEM='J2000'" \
+ "&TP_TYPE='ABSOLUTE'" \
+ "&ELEM_LABELS='YES'" \
+ "CSV_FORMAT='YES'" \
+ "&OBJ_DATA='YES'"
# check if self.targetname is a designation
# lower case + upper case + numbers = pot. case sensitive designation
if self.not_smallbody:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "'"
elif self.isorbit_record():
# Comet orbit record. Do not use DES, CAP. This test must
# occur before asteroid test.
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
elif self.isasteroid() and not self.comet:
# for asteroids, use 'DES="designation";'
for ident in self.parse_asteroid():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='" + \
urllib.quote(str(ident).encode("utf8")) + "%3B'"
elif self.iscomet() and not self.asteroid:
# for comets, potentially append the current apparition
# (CAP) parameter, or the fragmentation flag (NOFRAG)
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("NOFRAG%3B" if self.nofrag else "") + \
("CAP'" if self.cap else "'")
# elif (not self.targetname.replace(' ', '').isalpha() and not
# self.targetname.isdigit() and not
# self.targetname.islower() and not
# self.targetname.isupper()):
# url += "&COMMAND='DES=" + str(objectname) + "%3B'"
else:
url += "&COMMAND='" + str(objectname) + "%3B'"
if self.discreteepochs is not None:
url += "&TLIST="
for date in self.discreteepochs:
url += "'" + str(date) + "'"
elif (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
url += "&START_TIME='" \
+ urllib.quote(self.start_epoch.encode("utf8")) + "'" \
+ "&STOP_TIME='" \
+ urllib.quote(self.stop_epoch.encode("utf8")) + "'" \
+ "&STEP_SIZE='" + str(self.step_size) + "'"
else:
raise IOError('no epoch information given')
self.url = url
i = 0 # count number of connection tries
while True:
try:
src = urllib.urlopen(url).readlines()
break
except urllib.URLError:
time.sleep(0.1)
# in case the HORIZONS website is blocked (due to another query)
# wait 1 second and try again
i += 1
if i > 50:
return 0 # website could not be reached
# disseminate website source code
# identify header line and extract data block (elements data)
# also extract targetname, abs. magnitude (H), and slope parameter (G)
headerline = []
datablock = []
in_datablock = False
H, G = np.nan, np.nan
for idx, line in enumerate(src):
line = line.decode('UTF-8')
if 'JDTDB,' in line:
headerline = line.split(',')
if "$$EOE\n" in line:
in_datablock = False
if in_datablock:
datablock.append(line)
if "$$SOE\n" in line:
in_datablock = True
if "Target body name" in line:
targetname = line[18:50].strip()
if "rotational period in hours)" in src[idx].decode('UTF-8'):
HGline = src[idx+2].decode('UTF-8').split('=')
if 'B-V' in HGline[2] and 'G' in HGline[1]:
try:
H = float(HGline[1].rstrip('G'))
except ValueError:
pass
try:
G = float(HGline[2].rstrip('B-V'))
except ValueError:
pass
if ("Multiple major-bodies match string" in src[idx].decode('UTF-8') or
("Matching small-bodies" in src[idx].decode('UTF-8') and not
"No matches found" in src[idx+1].decode('UTF-8'))):
raise ValueError('Ambiguous target name; check URL: %s' %
url)
if ("Matching small-bodies" in src[idx].decode('UTF-8') and
"No matches found" in src[idx+1].decode('UTF-8')):
raise ValueError('Unknown target; check URL: %s' % url)
# field identification for each line in
elements = []
for line in datablock:
line = line.split(',')
this_el = []
fieldnames = []
datatypes = []
# create a dictionary for each date (each line)
for idx, item in enumerate(headerline):
if (item.find('JDTDB') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('datetime_jd')
datatypes.append(np.float64)
if (item.find('EC') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('e')
datatypes.append(np.float64)
if (item.find('QR') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('p')
datatypes.append(np.float64)
if (item.find('A') > -1) and len(item.strip()) == 1:
this_el.append(np.float64(line[idx]))
fieldnames.append('a')
datatypes.append(np.float64)
if (item.find('IN') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('incl')
datatypes.append(np.float64)
if (item.find('OM') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('node')
datatypes.append(np.float64)
if (item.find('W') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('argper')
datatypes.append(np.float64)
if (item.find('Tp') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('Tp')
datatypes.append(np.float64)
if (item.find('MA') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('meananomaly')
datatypes.append(np.float64)
if (item.find('TA') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('trueanomaly')
datatypes.append(np.float64)
if (item.find('PR') > -1):
# Earth years
this_el.append(np.float64(line[idx])/(365.256))
fieldnames.append('period')
datatypes.append(np.float64)
if (item.find('AD') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('Q')
datatypes.append(np.float64)
# append targetname
this_el.append(targetname)
fieldnames.append('targetname')
datatypes.append(object)
# append H
this_el.append(H)
fieldnames.append('H')
datatypes.append(np.float64)
# append G
this_el.append(G)
fieldnames.append('G')
datatypes.append(np.float64)
if len(this_el) > 0:
elements.append(tuple(this_el))
if len(elements) == 0:
return 0
# combine elements with column names and data types into ndarray
assert len(elements[0]) == len(fieldnames) == len(datatypes)
self.data = np.array(elements,
dtype=[(str(fieldnames[i]), datatypes[i]) for i
in range(len(fieldnames))])
return len(self)
def export2pyephem(self, center='500@10', equinox=2000.):
"""Call JPL HORIZONS website to obtain orbital elements based on the
provided targetname, epochs, and center code and create a
PyEphem (http://rhodesmill.org/pyephem/) object. This function
requires PyEphem to be installed.
:param center: str;
center body (default: 500@10 = Sun)
:param equinox: float;
equinox (default: 2000.0)
:result: list;
list of PyEphem objects, one per epoch
:example: >>> import callhorizons
>>> import numpy
>>> import ephem
>>>
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> ceres_pyephem = ceres.export2pyephem()
>>>
>>> nau = ephem.Observer() # setup observer site
>>> nau.lon = -111.653152/180.*numpy.pi
>>> nau.lat = 35.184108/180.*numpy.pi
>>> nau.elevation = 2100 # m
>>> nau.date = '2015/10/5 01:23' # UT
>>> print ('next rising: %s' % nau.next_rising(ceres_pyephem[0]))
>>> print ('next transit: %s' % nau.next_transit(ceres_pyephem[0]))
>>> print ('next setting: %s' % nau.next_setting(ceres_pyephem[0]))
"""
try:
import ephem
except ImportError:
raise ImportError(
'export2pyephem requires PyEphem to be installed')
# obtain orbital elements
self.get_elements(center)
objects = []
for el in self.data:
n = 0.9856076686/np.sqrt(el['a']**3) # mean daily motion
epoch_djd = el['datetime_jd']-2415020.0 # Dublin Julian date
epoch = ephem.date(epoch_djd)
epoch_str = "%d/%f/%d" % (epoch.triple()[1], epoch.triple()[2],
epoch.triple()[0])
# export to PyEphem
objects.append(ephem.readdb("%s,e,%f,%f,%f,%f,%f,%f,%f,%s,%i,%f,%f" %
(el['targetname'], el['incl'], el['node'],
el['argper'], el['a'], n, el['e'],
el['meananomaly'], epoch_str, equinox,
el['H'], el['G'])))
return objects
| mit | 2,673,770,013,249,938,400 | 44.952055 | 90 | 0.409781 | false |
JulyKikuAkita/PythonPrac | cs15211/SoupServings.py | 1 | 5860 | __source__ = 'https://leetcode.com/problems/soup-servings/'
# Time: O(N^2)
# Space: O(N^2)
#
# Description: Leetcode # 808. Soup Servings
#
# There are two types of soup: type A and type B.
# Initially we have N ml of each type of soup. There are four kinds of operations:
#
# Serve 100 ml of soup A and 0 ml of soup B
# Serve 75 ml of soup A and 25 ml of soup B
# Serve 50 ml of soup A and 50 ml of soup B
# Serve 25 ml of soup A and 75 ml of soup B
#
# When we serve some soup, we give it to someone and we no longer have it.
# Each turn, we will choose from the four operations with equal probability 0.25.
# If the remaining volume of soup is not enough to complete the operation,
# we will serve as much as we can.
# We stop once we no longer have some quantity of both types of soup.
#
# Note that we do not have the operation where all 100 ml's of soup B are used first.
#
# Return the probability that soup A will be empty first,
# plus half the probability that A and B become empty at the same time.
#
#
# Example:
# Input: N = 50
# Output: 0.625
# Explanation:
# If we choose the first two operations, A will become empty first. For the third operation,
# A and B will become empty at the same time. For the fourth operation, B will become empty first.
# So the total probability of A becoming empty first plus half the probability that
# A and B become empty at the same time, is 0.25 * (1 + 1 + 0.5 + 0) = 0.625.
#
# Notes:
#
# 0 <= N <= 10^9.
# Answers within 10^-6 of the true value will be accepted as correct.
#
import unittest
# 60ms 21.28%
class Solution(object):
def soupServings(self, N):
"""
:type N: int
:rtype: float
"""
Q, R = divmod(N, 25)
N = Q + (R > 0)
if N >= 500: return 1
memo = {}
def dp(x, y):
if (x, y) not in memo:
if x <= 0 or y <= 0:
ans = 0.5 if x<=0 and y<=0 else 1.0 if x<=0 else 0.0
else:
ans = 0.25 * (dp(x-4,y)+dp(x-3,y-1)+dp(x-2,y-2)+dp(x-1,y-3))
memo[x, y] = ans
return memo[x, y]
return dp(N, N)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/soup-servings/solution/
#
Note: The most important hint in this problem for me is
"Answers within 10^-6 of the true value will be accepted as correct.".
And when I get timed out or runtime error with my DP,
I tried to print out results of each call for each N.
They are monotonically increasing, and getting closer and closer to 1.
If you print it out, you will observe that when it is as large as 5551
the gap between the result and 1 is less than 10^-6.
Approach #1: Dynamic Programming [Accepted]
Complexity Analysis
Time Complexity: O(1). (There exists a constant C such that the algorithm never performs more than C steps.)
Space Complexity: O(1) (There exists a constant C such that the algorithm never uses more than C space.)
# 7ms 56.04%
class Solution {
public double soupServings(int N) {
N = N / 25 + ( N % 25 > 0 ? 1: 0);
if (N > 500) return 1.0;
double[][] memo = new double[N + 1][N + 1];
for (int s = 0; s <= 2 * N; s++) {
for (int i = 0; i <= N; i++) {
int j = s - i;
if (j < 0 || j > N) continue;
double ans = 0.0;
if (i == 0) ans = 1.0;
if (i == 0 && j == 0) ans = 0.5;
if (i > 0 && j > 0) {
ans = 0.25 * (memo[M(i-4)][j] + memo[M(i-3)][M(j-1)] +
memo[M(i-2)][M(j-2)] + memo[M(i-1)][M(j-3)]);
}
memo[i][j] = ans;
}
}
return memo[N][N];
}
private int M(int x) {
return Math.max(0, x);
}
}
# DFS + pruning
# 7ms 56.04%
class Solution {
public double soupServings(int N) {
if (N > 5000) return 1.0; // see Note
return helper(N, N, new Double[N + 1][N + 1]);
}
private double helper(int A, int B, Double[][] memo) {
if (A <= 0 && B <= 0) return 0.5; // base case 1
if (A <= 0) return 1.0; // base case 2
if (B <= 0) return 0.0; // base case 3
if (memo[A][B] != null) return memo[A][B];
int[] serveA = {100, 75, 50, 25};
int[] serveB = {0, 25, 50, 75};
memo[A][B] = 0.0;
for (int i = 0; i < 4; i++) {
memo[A][B] += helper(A - serveA[i], B - serveB[i], memo);
}
return memo[A][B] *= 0.25;
}
}
# 2ms 100%
class Solution {
public double soupServings(int N) {
int n = N;
if (n >= 10000) {
return 1; // because you have 75% cost A more than B.
}
if (n % 25 != 0) {
n = n / 25 + 1;
} else {
n = n / 25;
}
int[][] options = new int[][]{{4, 0}, {3, 1}, {2, 2}, {1, 3}};
return dfs(n, n, options, new Double[n + 1][n + 1]);
}
private double dfs(int soupA, int soupB, int[][]options, Double[][] mem) {
if (soupA <= 0) {
if (soupB > 0) {
return 1;
}
if (soupB <= 0) {
return 0.5;
}
}
if (soupB <= 0) {
return 0;
}
if (mem[soupA][soupB] != null) {
return mem[soupA][soupB];
}
// 4 options
double res = 0.0;
for (int i = 0; i < options.length; i++) {
int nextA = soupA - options[i][0];
int nextB = soupB - options[i][1];
res += 0.25 * dfs(nextA, nextB, options, mem);
}
mem[soupA][soupB] = res;
return res;
}
}
'''
| apache-2.0 | -7,564,282,484,889,298,000 | 31.197802 | 108 | 0.523038 | false |
torkelsson/meta-package-manager | meta_package_manager/managers/mas.py | 1 | 5319 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2018 Kevin Deldycke <[email protected]>
# and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import re
from boltons.cacheutils import cachedproperty
from ..base import PackageManager
from ..platform import MACOS
class MAS(PackageManager):
platforms = frozenset([MACOS])
# 'mas outdated' output has been changed in 1.3.1: https://github.com
# /mas-cli/mas/commit/ca72ee42b1c5f482513b1d2fbf780b0bf3d9618b
requirement = '>= 1.3.1'
name = "Mac AppStore"
def get_version(self):
""" Fetch version from ``mas version`` output."""
return self.run([self.cli_path, 'version'])
@cachedproperty
def installed(self):
""" Fetch installed packages from ``mas list`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas list
408981434 iMovie (10.1.4)
747648890 Telegram (2.30)
"""
installed = {}
output = self.run([self.cli_path] + self.cli_args + ['list'])
if output:
regexp = re.compile(r'(\d+) (.*) \((\S+)\)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name, installed_version = \
match.groups()
installed[package_id] = {
'id': package_id,
'name': package_name,
# Normalize unknown version. See:
# https://github.com/mas-cli/mas/commit
# /1859eaedf49f6a1ebefe8c8d71ec653732674341
'installed_version': (
installed_version if installed_version != 'unknown'
else None)}
return installed
def search(self, query):
""" Fetch matching packages from ``mas search`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas search python
689176796 Python Runner
630736088 Learning Python
945397020 Run Python
891162632 Python Lint
1025391371 Tutorial for Python
1164498373 PythonGames
"""
matches = {}
output = self.run([self.cli_path] + self.cli_args + [
'search', query])
if output:
regexp = re.compile(r'(\d+) (.*)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name = match.groups()
matches[package_id] = {
'id': package_id,
'name': package_name,
'latest_version': None,
'exact': self.exact_match(query, package_name)}
return matches
@cachedproperty
def outdated(self):
""" Fetch outdated packages from ``mas outdated`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas outdated
.. todo
An example of ``mas outdated`` output is missing above.
"""
outdated = {}
output = self.run([self.cli_path] + self.cli_args + ['outdated'])
if output:
regexp = re.compile(r'(\d+) (.*) \((\S+) -> (\S+)\)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name, installed_version, \
latest_version = match.groups()
outdated[package_id] = {
'id': package_id,
'name': package_name,
'latest_version': latest_version,
# Normalize unknown version. See:
# https://github.com/mas-cli/mas/commit
# /1859eaedf49f6a1ebefe8c8d71ec653732674341
'installed_version': (
installed_version if installed_version != 'unknown'
else None)}
return outdated
def upgrade_cli(self, package_id=None):
cmd = [self.cli_path] + self.cli_args + ['upgrade']
if package_id:
cmd.append(package_id)
return cmd
def upgrade_all_cli(self):
return self.upgrade_cli()
| gpl-2.0 | -8,493,799,302,312,990,000 | 31.432927 | 79 | 0.536943 | false |
apuigsech/CryptoAPI | CryptoAPI/CryptoAPI.py | 1 | 8197 | #!/usr/bin/env python
# CryptoAPI: Python Crypto API implementation
#
# Copyright (c) 2014 - Albert Puigsech Galicia ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from CryptsyAPI import CryptsyAPI
from BittrexAPI import BittrexAPI
class CryptoAPI_iface(object):
def balances(self, currency=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def marketstatus(self, market=None, depth_level=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def orders(self, market=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
def delorder(self, order_id=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
class CryptoAPI_cryptsy(CryptsyAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_cryptsy, self).__init__(key, secret, simulation, cached)
CryptoAPI_iface.__init__(self)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
info = self.getinfo(cached)['return']
for i in info['balances_available']:
if i == currency or (currency == None and (float(info['balances_available'][i]) > 0 or info['balances_hold'].has_key(i))):
ret['available'][i] = float(info['balances_available'][i])
ret['hold'][i] = float(info['balances_hold'][i]) if info['balances_hold'].has_key(i) else float(0)
ret['total'][i] = ret['available'][i] + ret['hold'][i]
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
status = self.getmarkets(cached)['return']
ret = {}
for i in status:
marketname = '{0}-{1}'.format(i['secondary_currency_code'], i['primary_currency_code'])
if marketname == market or i['primary_currency_code'] == market or i['secondary_currency_code'] == market or market == None:
ret[marketname] = {
'id': int(i['marketid']),
'last_price': float(i['last_trade']),
'high_price': float(i['high_trade']),
'low_price': float(i['low_trade']),
'volume': float(i['current_volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.depth(i['marketid'], cached)['return']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j[0]),float(j[1])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j[0]),float(j[1])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
orders = self.allmyorders(cached)['return']
ret = []
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
if simulation == None:
simulation = self.simulation
status = self.marketstatus(market, 1)
print status
if pricetype == 'market':
price = 4294967296
elif pricetype == 'best':
if type == 'buy':
price = status[market]['depth']['sell'][0][0]
elif type == 'sell':
price = status[market]['depth']['buy'][0][0]
elif pricetype == 'border' or pricetype == 'overboder':
if type == 'buy':
price = status[market]['depth']['buy'][0][0]
elif type == 'sell':
price = status[market]['depth']['sell'][0][0]
if pricetype == 'overboder':
if type == 'buy':
price += 0.00000001
elif type == 'sell':
price -= 0.00000001
return self.createorder(status[market]['id'], type, amount, price)
def delorder(self, order_id=None, simulation=None):
return None
def _getmarketfromid(self, id):
markets = self.marketstatus(cached=True)
for marketname in markets:
if markets[marketname]['id'] == id:
return marketname
return None
def _getidfrommarket(self, market):
markets = self.marketstatus(cached=True)
if markets.has_key(market):
return markets[market]['id']
else:
return None
class CryptoAPI_bittrex(BittrexAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_bittrex, self).__init__(key, secret, simulation, cached)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
if currency==None:
info = self.getbalances(cached)['result']
else:
pass
info = [self.getbalance(currency, cached)['result']]
for i in info:
ret['available'][i['Currency']] = float(i['Available'])
ret['hold'][i['Currency']] = float(i['Pending'])
ret['total'][i['Currency']] = float(i['Balance'])
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
ret = {}
status = self.getmarkets(cached)['result']
status = self.getmarketsummaries(cached)['result']
for i in status:
marketname = i['MarketName']
#if marketname == market or market == i['BaseCurrency'] or market == i['MarketCurrency'] or market == None:
if marketname == market or market in marketname or market == None:
if i['Volume'] == None:
i['Volume'] = 0
ret[marketname] = {
'id': marketname,
'last_price': float(i['Last']),
'high_price': float(str(i['High'])), # FIX a bug on Bittrex data returned
'low_price': float(i['Low']),
'volume': float(i['Volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.getorderbook(marketname, 'both', depth_level, cached)['result']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j['Rate']),float(j['Quantity'])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j['Rate']),float(j['Quantity'])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
ret = []
orders = self.getopenorders(market, cached)['return']
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
pass
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
pass
def delorder(self, order_id=None, simulation=None):
pass
def CryptoAPI(type, key, secret, simulation=False, cached=False):
# TODO Security: type validation
code = 'CryptoAPI_{0}(key, secret, simulation, cached)'.format(type)
api = eval(code)
return api | gpl-3.0 | 8,747,078,000,745,754,000 | 27.866197 | 127 | 0.654508 | false |
jbq/ufw | src/backend.py | 1 | 24402 | '''backend.py: interface for ufw backends'''
#
# Copyright 2008-2011 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import os
import re
import stat
import sys
import ufw.util
from ufw.util import warn, debug
from ufw.common import UFWError, config_dir, iptables_dir, UFWRule
import ufw.applications
class UFWBackend:
'''Interface for backends'''
def __init__(self, name, dryrun, extra_files=None):
self.defaults = None
self.name = name
self.dryrun = dryrun
self.rules = []
self.rules6 = []
self.files = {'defaults': os.path.join(config_dir, 'default/ufw'),
'conf': os.path.join(config_dir, 'ufw/ufw.conf'),
'apps': os.path.join(config_dir, 'ufw/applications.d') }
if extra_files != None:
self.files.update(extra_files)
self.loglevels = {'off': 0,
'low': 100,
'medium': 200,
'high': 300,
'full': 400 }
self.do_checks = True
try:
self._do_checks()
self._get_defaults()
self._read_rules()
except Exception:
raise
self.profiles = ufw.applications.get_profiles(self.files['apps'])
self.iptables = os.path.join(iptables_dir, "iptables")
self.iptables_restore = os.path.join(iptables_dir, "iptables-restore")
self.ip6tables = os.path.join(iptables_dir, "ip6tables")
self.ip6tables_restore = os.path.join(iptables_dir, \
"ip6tables-restore")
self.iptables_version = ufw.util.get_iptables_version(self.iptables)
def is_enabled(self):
'''Is firewall configured as enabled'''
if self.defaults.has_key('enabled') and \
self.defaults['enabled'] == 'yes':
return True
return False
def use_ipv6(self):
'''Is firewall configured to use IPv6'''
if self.defaults.has_key('ipv6') and \
self.defaults['ipv6'] == 'yes' and \
os.path.exists("/proc/sys/net/ipv6"):
return True
return False
def _get_default_policy(self, primary="input"):
'''Get default policy for specified primary chain'''
policy = "default_" + primary + "_policy"
rstr = ""
if self.defaults[policy] == "accept":
rstr = "allow"
elif self.defaults[policy] == "accept_no_track":
rstr = "allow-without-tracking"
elif self.defaults[policy] == "reject":
rstr = "reject"
else:
rstr = "deny"
return rstr
def _do_checks(self):
'''Perform basic security checks:
is setuid or setgid (for non-Linux systems)
checks that script is owned by root
checks that every component in absolute path are owned by root
warn if script is group writable
warn if part of script path is group writable
Doing this at the beginning causes a race condition with later
operations that don't do these checks. However, if the user running
this script is root, then need to be root to exploit the race
condition (and you are hosed anyway...)
'''
if not self.do_checks:
err_msg = _("Checks disabled")
warn(err_msg)
return True
# Not needed on Linux, but who knows the places we will go...
if os.getuid() != os.geteuid():
err_msg = _("ERROR: this script should not be SUID")
raise UFWError(err_msg)
if os.getgid() != os.getegid():
err_msg = _("ERROR: this script should not be SGID")
raise UFWError(err_msg)
uid = os.getuid()
if uid != 0:
err_msg = _("You need to be root to run this script")
raise UFWError(err_msg)
# Use these so we only warn once
warned_world_write = {}
warned_group_write = {}
warned_owner = {}
profiles = []
if not os.path.isdir(self.files['apps']):
warn_msg = _("'%s' does not exist") % (self.files['apps'])
warn(warn_msg)
else:
pat = re.compile(r'^\.')
for profile in os.listdir(self.files['apps']):
if not pat.search(profile):
profiles.append(os.path.join(self.files['apps'], profile))
for path in self.files.values() + [ os.path.abspath(sys.argv[0]) ] + \
profiles:
while True:
debug("Checking " + path)
if path == self.files['apps'] and \
not os.path.isdir(self.files['apps']):
break
try:
statinfo = os.stat(path)
mode = statinfo[stat.ST_MODE]
except OSError:
err_msg = _("Couldn't stat '%s'") % (path)
raise UFWError(err_msg)
except Exception:
raise
if statinfo.st_uid != 0 and not warned_owner.has_key(path):
warn_msg = _("uid is %(uid)s but '%(path)s' is owned by " \
"%(st_uid)s") % ({'uid': str(uid), \
'path': path, \
'st_uid': str(statinfo.st_uid)})
warn(warn_msg)
warned_owner[path] = True
if mode & stat.S_IWOTH and not warned_world_write.has_key(path):
warn_msg = _("%s is world writable!") % (path)
warn(warn_msg)
warned_world_write[path] = True
if mode & stat.S_IWGRP and not warned_group_write.has_key(path):
warn_msg = _("%s is group writable!") % (path)
warn(warn_msg)
warned_group_write[path] = True
if path == "/":
break
path = os.path.dirname(path)
if not path:
raise OSError(errno.ENOENT, "Could not find '%s'" % (path))
for f in self.files:
if f != 'apps' and not os.path.isfile(self.files[f]):
err_msg = _("'%(f)s' file '%(name)s' does not exist") % \
({'f': f, 'name': self.files[f]})
raise UFWError(err_msg)
def _get_defaults(self):
'''Get all settings from defaults file'''
self.defaults = {}
for f in [self.files['defaults'], self.files['conf']]:
try:
orig = ufw.util.open_file_read(f)
except Exception:
err_msg = _("Couldn't open '%s' for reading") % (f)
raise UFWError(err_msg)
pat = re.compile(r'^\w+="?\w+"?')
for line in orig:
if pat.search(line):
tmp = re.split(r'=', line.strip())
self.defaults[tmp[0].lower()] = tmp[1].lower().strip('"\'')
orig.close()
# do some default policy sanity checking
policies = ['accept', 'accept_no_track', 'drop', 'reject']
for c in [ 'input', 'output', 'forward' ]:
if not self.defaults.has_key('default_%s_policy' % (c)):
err_msg = _("Missing policy for '%s'" % (c))
raise UFWError(err_msg)
p = self.defaults['default_%s_policy' % (c)]
if p not in policies or \
(p == 'accept_no_track' and c == 'forward'):
err_msg = _("Invalid policy '%(policy)s' for '%(chain)s'" % \
({'policy': p, 'chain': c}))
raise UFWError(err_msg)
def set_default(self, fn, opt, value):
'''Sets option in defaults file'''
if not re.match(r'^[\w_]+$', opt):
err_msg = _("Invalid option")
raise UFWError(err_msg)
# Perform this here so we can present a nice error to the user rather
# than a traceback
if not os.access(fn, os.W_OK):
err_msg = _("'%s' is not writable" % (fn))
raise UFWError(err_msg)
try:
fns = ufw.util.open_files(fn)
except Exception:
raise
fd = fns['tmp']
found = False
pat = re.compile(r'^' + opt + '=')
for line in fns['orig']:
if pat.search(line):
ufw.util.write_to_file(fd, opt + "=" + value + "\n")
found = True
else:
ufw.util.write_to_file(fd, line)
# Add the entry if not found
if not found:
ufw.util.write_to_file(fd, opt + "=" + value + "\n")
try:
ufw.util.close_files(fns)
except Exception:
raise
# Now that the files are written out, update value in memory
self.defaults[opt.lower()] = value.lower().strip('"\'')
def set_default_application_policy(self, policy):
'''Sets default application policy of firewall'''
if not self.dryrun:
if policy == "allow":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"ACCEPT\"")
except Exception:
raise
elif policy == "deny":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"DROP\"")
except Exception:
raise
elif policy == "reject":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"REJECT\"")
except Exception:
raise
elif policy == "skip":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"SKIP\"")
except Exception:
raise
else:
err_msg = _("Unsupported policy '%s'") % (policy)
raise UFWError(err_msg)
rstr = _("Default application policy changed to '%s'") % (policy)
return rstr
def get_app_rules_from_template(self, template):
'''Return a list of UFWRules based on the template rule'''
rules = []
profile_names = self.profiles.keys()
if template.dport in profile_names and template.sport in profile_names:
dports = ufw.applications.get_ports(self.profiles[template.dport])
sports = ufw.applications.get_ports(self.profiles[template.sport])
for i in dports:
tmp = template.dup_rule()
tmp.dapp = ""
tmp.set_port("any", "src")
try:
(port, proto) = ufw.util.parse_port_proto(i)
tmp.set_protocol(proto)
tmp.set_port(port, "dst")
except Exception:
raise
tmp.dapp = template.dapp
if template.dport == template.sport:
# Just use the same ports as dst for src when they are the
# same to avoid duplicate rules
tmp.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(i)
tmp.set_protocol(proto)
tmp.set_port(port, "src")
except Exception:
raise
tmp.sapp = template.sapp
rules.append(tmp)
else:
for j in sports:
rule = tmp.dup_rule()
rule.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(j)
rule.set_protocol(proto)
rule.set_port(port, "src")
except Exception:
raise
if rule.protocol == "any":
rule.set_protocol(tmp.protocol)
rule.sapp = template.sapp
rules.append(rule)
elif template.sport in profile_names:
for p in ufw.applications.get_ports(self.profiles[template.sport]):
rule = template.dup_rule()
rule.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(p)
rule.set_protocol(proto)
rule.set_port(port, "src")
except Exception:
raise
rule.sapp = template.sapp
rules.append(rule)
elif template.dport in profile_names:
for p in ufw.applications.get_ports(self.profiles[template.dport]):
rule = template.dup_rule()
rule.dapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(p)
rule.set_protocol(proto)
rule.set_port(port, "dst")
except Exception:
raise
rule.dapp = template.dapp
rules.append(rule)
if len(rules) < 1:
err_msg = _("No rules found for application profile")
raise UFWError(err_msg)
return rules
def update_app_rule(self, profile):
'''Update rule for profile in place. Returns result string and bool
on whether or not the profile is used in the current ruleset.
'''
updated_rules = []
updated_rules6 = []
last_tuple = ""
rstr = ""
updated_profile = False
# Remember, self.rules is from user[6].rules, and not the running
# firewall.
for r in self.rules + self.rules6:
if r.dapp == profile or r.sapp == profile:
# We assume that the rules are in app rule order. Specifically,
# if app rule has multiple rules, they are one after the other.
# If the rule ordering changes, the below will have to change.
tupl = r.get_app_tuple()
if tupl == last_tuple:
# Skip the rule if seen this tuple already (ie, it is part
# of a known tuple).
continue
else:
# Have a new tuple, so find and insert new app rules here
template = r.dup_rule()
template.set_protocol("any")
if template.dapp != "":
template.set_port(template.dapp, "dst")
if template.sapp != "":
template.set_port(template.sapp, "src")
try:
new_app_rules = self.get_app_rules_from_template(\
template)
except Exception:
raise
for new_r in new_app_rules:
new_r.normalize()
if new_r.v6:
updated_rules6.append(new_r)
else:
updated_rules.append(new_r)
last_tuple = tupl
updated_profile = True
else:
if r.v6:
updated_rules6.append(r)
else:
updated_rules.append(r)
if updated_profile:
self.rules = updated_rules
self.rules6 = updated_rules6
rstr += _("Rules updated for profile '%s'") % (profile)
try:
self._write_rules(False) # ipv4
self._write_rules(True) # ipv6
except Exception:
err_msg = _("Couldn't update application rules")
raise UFWError(err_msg)
return (rstr, updated_profile)
def find_application_name(self, profile_name):
'''Find the application profile name for profile_name'''
if self.profiles.has_key(profile_name):
return profile_name
match = ""
matches = 0
for n in self.profiles.keys():
if n.lower() == profile_name.lower():
match = n
matches += 1
debug_msg = "'%d' matches for '%s'" % (matches, profile_name)
debug(debug_msg)
if matches == 1:
return match
elif matches > 1:
err_msg = _("Found multiple matches for '%s'. Please use exact profile name") % \
(profile_name)
err_msg = _("Could not find a profile matching '%s'") % (profile_name)
raise UFWError(err_msg)
def find_other_position(self, position, v6):
'''Return the absolute position in the other list of the rule with the
user position of the given list. For example, find_other_position(4,
True) will return the absolute position of the rule in the ipv4 list
matching the user specified '4' rule in the ipv6 list.
'''
# Invalid search (v6 rule with too low position)
if v6 and position > len(self.rules6):
raise ValueError()
# Invalid search (v4 rule with too high position)
if not v6 and position > len(self.rules):
raise ValueError()
if position < 1:
raise ValueError()
rules = []
if v6:
rules = self.rules6
else:
rules = self.rules
# self.rules[6] is a list of tuples. Some application rules have
# multiple tuples but the user specifies by ufw rule, not application
# tuple, so we need to find how many tuples there are leading up to
# the specified position, which we can then use as an offset for
# getting the proper match_rule.
app_rules = {}
tuple_offset = 0
for i, r in enumerate(rules):
if i >= position:
break
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
tuple_offset += 1
else:
app_rules[tupl] = True
rules = []
if v6:
rules = self.rules
match_rule = self.rules6[position - 1 + tuple_offset].dup_rule()
match_rule.set_v6(False)
else:
rules = self.rules6
match_rule = self.rules[position - 1 + tuple_offset].dup_rule()
match_rule.set_v6(True)
count = 1
for r in rules:
if UFWRule.match(r, match_rule) == 0:
return count
count += 1
return 0
def get_loglevel(self):
'''Gets current log level of firewall'''
level = 0
rstr = _("Logging: ")
if not self.defaults.has_key('loglevel') or \
self.defaults['loglevel'] not in self.loglevels.keys():
level = -1
rstr += _("unknown")
else:
level = self.loglevels[self.defaults['loglevel']]
if level == 0:
rstr += "off"
else:
rstr += "on (%s)" % (self.defaults['loglevel'])
return (level, rstr)
def set_loglevel(self, level):
'''Sets log level of firewall'''
if level not in self.loglevels.keys() + ['on']:
err_msg = _("Invalid log level '%s'") % (level)
raise UFWError(err_msg)
new_level = level
if level == "on":
if not self.defaults.has_key('loglevel') or \
self.defaults['loglevel'] == "off":
new_level = "low"
else:
new_level = self.defaults['loglevel']
try:
self.set_default(self.files['conf'], "LOGLEVEL", new_level)
self.update_logging(new_level)
except Exception:
raise
if new_level == "off":
return _("Logging disabled")
else:
return _("Logging enabled")
def get_rules(self):
'''Return list of all rules'''
return self.rules + self.rules6
def get_rules_count(self, v6):
'''Return number of ufw rules (not iptables rules)'''
rules = []
if v6:
rules = self.rules6
else:
rules = self.rules
count = 0
app_rules = {}
for r in rules:
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
debug("Skipping found tuple '%s'" % (tupl))
continue
else:
app_rules[tupl] = True
count += 1
return count
def get_rule_by_number(self, num):
'''Return rule specified by number seen via "status numbered"'''
rules = self.get_rules()
count = 1
app_rules = {}
for r in rules:
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
debug("Skipping found tuple '%s'" % (tupl))
continue
else:
app_rules[tupl] = True
if count == int(num):
return r
count += 1
return None
def get_matching(self, rule):
'''See if there is a matching rule in the existing ruleset. Note this
does not group rules by tuples.'''
matched = []
count = 0
for r in self.get_rules():
count += 1
ret = rule.fuzzy_dst_match(r)
if ret < 1:
matched.append(count)
return matched
# API overrides
def set_default_policy(self, policy, direction):
'''Set default policy for specified direction'''
raise UFWError("UFWBackend.set_default_policy: need to override")
def get_running_raw(self, rules_type):
'''Get status of running firewall'''
raise UFWError("UFWBackend.get_running_raw: need to override")
def get_status(self, verbose, show_count):
'''Get managed rules'''
raise UFWError("UFWBackend.get_status: need to override")
def set_rule(self, rule, allow_reload):
'''Update firewall with rule'''
raise UFWError("UFWBackend.set_rule: need to override")
def start_firewall(self):
'''Start the firewall'''
raise UFWError("UFWBackend.start_firewall: need to override")
def stop_firewall(self):
'''Stop the firewall'''
raise UFWError("UFWBackend.stop_firewall: need to override")
def get_app_rules_from_system(self, template, v6):
'''Get a list if rules based on template'''
raise UFWError("UFWBackend.get_app_rules_from_system: need to " + \
"override")
def update_logging(self, level):
'''Update loglevel of running firewall'''
raise UFWError("UFWBackend.update_logging: need to override")
def reset(self):
'''Reset the firewall'''
raise UFWError("UFWBackend.reset: need to override")
| gpl-3.0 | 7,539,033,067,947,464,000 | 35.475336 | 93 | 0.489017 | false |
TUB-Control/PaPI | papi/plugin/io/UDP_Plugin/UDP_Plugin.py | 1 | 26844 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Technische Universität Berlin,
Fakultät IV - Elektrotechnik und Informatik,
Fachgebiet Regelungssysteme,
Einsteinufer 17, D-10587 Berlin, Germany
This file is part of PaPI.
PaPI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.b
PaPI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PaPI. If not, see <http://www.gnu.org/licenses/>.
Contributors
Christian Klauer
Stefan Ruppin
"""
from papi.plugin.base_classes.iop_base import iop_base
from papi.data.DPlugin import DBlock
from papi.data.DSignal import DSignal
from papi.data.DParameter import DParameter
import numpy as np
import threading
import os
import sys
import socket
import ast
import struct
import json
import time
import pickle
import base64
from threading import Timer
from socketIO_client import SocketIO, LoggingNamespace
class OptionalObject(object):
def __init__(self, ORTD_par_id, nvalues):
self.ORTD_par_id = ORTD_par_id
self.nvalues = nvalues
self.sendOnReceivePort = True
self.UseSocketIO = True
class UDP_Plugin(iop_base):
def cb_get_plugin_configuration(self):
config = {
'address': {
'value': '127.0.0.1',
'advanced': 'Connection',
'tooltip': 'IP address of the source',
'display_text': 'Target IP address'
},
'source_port': {
'value': '20000',
'advanced': 'Connection',
'tooltip': 'Port of incoming data',
'display_text': 'Source Port'
},
'out_port': {
'value': '20001',
'advanced': 'Connection',
'tooltip': 'Port for outgoing data',
'display_text': 'Send Port'
},
'SeparateSignals': {
'value': '0',
'advanced': 'General',
'tooltip': 'Split up signal vectors to separate signals',
'display_text': 'Separate Signals'
},
'SendOnReceivePort': {
'value': '0',
'advanced': 'Connection',
'display_text': 'Same port for send and receive',
'tooltip': 'Use the source port to send data back to the target'
},
"UseSocketIO" : {
'value' : '0',
'advanced' : 'SocketIO',
'tooltip' : 'Use socket.io connection to node.js target-server',
'display_text': 'Use SocketIO',
'type' : 'bool'
},
'socketio_port': {
'value': '8091',
'advanced': 'SocketIO',
'tooltip': 'Port for the SocketIO Connection',
'display_text': 'SocketIO Port'
},
"OnlyInitialConfig" : {
'value' :'0',
'tooltip' : 'Use only first configuration, ignore further configurations.',
'type' : 'bool',
'advanced': 'General'
}
}
return config
def cb_initialize_plugin(self):
print('ORTD', self.__id__, ':process id', os.getpid())
self.config = self.pl_get_current_config_ref()
# open UDP
self.HOST = self.config['address']['value']
self.SOURCE_PORT = int(self.config['source_port']['value'])
self.OUT_PORT = int(self.config['out_port']['value'])
self.LOCALBIND_HOST = '' # config['source_address']['value'] #CK
self.sendOnReceivePort = True if self.config['SendOnReceivePort']['value'] == '1' else False
self.UseSocketIO = True if self.config['UseSocketIO']['value'] == '1' else False
if self.UseSocketIO:
self.SocketIOPort = int(self.config['socketio_port']['value'])
#self.sendOnReceivePort = True # NOTE: remove this
#self.UseSocketIO = True # NOTE: remove this
print ("SendOnReceivePort = ", self.sendOnReceivePort)
print ("UseSocketIO = ", self.UseSocketIO)
self.PAPI_SIMULINK_BLOCK = False
self.separate = int(self.config['SeparateSignals']['value'])
self.onlyInitialConfig = self.config['OnlyInitialConfig']['value'] == '1'
self.hasInitialConfig = False
if (not self.sendOnReceivePort):
# SOCK_DGRAM is the socket type to use for UDP sockets
self.sock_parameter = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_parameter.setblocking(1)
self.ControlBlock = DBlock('ControllerSignals')
self.ControlBlock.add_signal(DSignal('ControlSignalReset'))
self.ControlBlock.add_signal(DSignal('ControlSignalCreate'))
self.ControlBlock.add_signal(DSignal('ControlSignalSub'))
self.ControlBlock.add_signal(DSignal('ControllerSignalParameter'))
self.ControlBlock.add_signal(DSignal('ControllerSignalClose'))
self.ControlBlock.add_signal(DSignal('ActiveTab'))
self.pl_send_new_block_list([self.ControlBlock])
self.t = 0
self.pl_set_event_trigger_mode(True)
if not self.UseSocketIO:
self.sock_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if (not self.sendOnReceivePort):
try:
self.sock_recv.bind((self.LOCALBIND_HOST, self.SOURCE_PORT)) # CK
print("UDP_Plugin-plugin listening on: ", self.LOCALBIND_HOST, ":", self.SOURCE_PORT) #CK
except socket.error as msg:
sys.stderr.write("[ERROR] Can't start UDP_Plugin due to %s\n" % msg)
return False
else:
print ("---- Using client UDP mode (not binding to a port) ----")
self.sock_recv.settimeout(1)
self.thread_goOn = True
self.lock = threading.Lock()
self.thread = threading.Thread(target=self.thread_execute)
self.thread.start()
if self.UseSocketIO:
print ("Using socket.io connection on port", self.SocketIOPort)
self.thread_socket_goOn = True
self.thread_socketio = threading.Thread(target=self.thread_socketio_execute)
self.blocks = {}
self.Sources = {}
self.parameters = {}
self.signal_values = {}
self.block_id = 0
self.config_complete = False
self.config_buffer = {}
self.timer = Timer(3,self.callback_timeout_timer)
self.timer_active = False
self.ConsoleBlock = DBlock('ConsoleSignals')
self.ConsoleBlock.add_signal(DSignal('MainSignal'))
self.pl_send_new_block_list([self.ConsoleBlock])
self.consoleIn = DParameter('consoleIn',default='')
self.pl_send_new_parameter_list([self.consoleIn])
if self.UseSocketIO:
self.thread_socketio.start()
return True
def SIO_callback_SCISTOUT(self, data):
# Got a chunk of data from a console interface in the target server
self.sio_count += 1
self.pl_send_new_data(self.ConsoleBlock.name, [self.sio_count], {'MainSignal':data['Data']})
def SIO_callback_PAPICONFIG(self, data):
# Got a new PaPI plugin configuration in JSON-format via socket.io connection
# currently the config is transmitted via ORTD packets that may also be encapsulated into socket.io
# added 12.8.15, CK
print ("Got a new config in JSON/socket.io format")
print (data)
self.check_and_process_cfg(data)
# TODO: Test this
def SIO_callback_ORTDPACKET(self, data):
# Got an encapsulated packet from ORTD, not transfered via UDP but encapsulated within the socket.io connection
#print ("Got data packet from ORTD via socket.io")
#print (data)
self.process_received_package(base64.b64decode(data)) # data must be a binary blob
def thread_socketio_execute(self):
#self.sio = SocketIO('localhost', 8091, LoggingNamespace)
#self.sio.on('SCISTOUT',self.callback_SCISTOUT)
while True:
try:
with SocketIO(self.HOST, self.SocketIOPort, LoggingNamespace) as self.sio:
self.sio.on('SCISTOUT',self.SIO_callback_SCISTOUT)
self.sio.on('PAPICONFIG',self.SIO_callback_PAPICONFIG)
self.sio.on('ORTDPACKET',self.SIO_callback_ORTDPACKET)
self.request_new_config_from_ORTD()
self.sio_count = 0
while self.thread_socket_goOn:
self.sio.wait(seconds=1)
except:
print("Something failed within socket.io")
def cb_pause(self):
self.lock.acquire()
self.thread_goOn = False
self.lock.release()
self.thread.join()
def cb_resume(self):
self.thread_goOn = True
self.thread = threading.Thread(target=self.thread_execute, args=(self.HOST, self.SOURCE_PORT))
self.thread.start()
def thread_execute(self):
time.sleep(2) # TODO: Grrr. do not use sleep for thread sync...
#try:
if not self.UseSocketIO:
self.request_new_config_from_ORTD()
goOn = True
newData = False
signal_values = {}
while goOn:
try:
if not self.sendOnReceivePort:
rev = self.sock_recv.recv(65507) # not feasible for network connection other than loopback
else:
#print ("---- Waiting for data ----")
rev, server = self.sock_recv.recvfrom(65507) # not feasible for network connection other than loopback
#print ("---- got ----")
#print (rev)
except socket.timeout:
# print('timeout')
newData = False
except socket.error:
print('ORTD got socket error')
else:
newData = True
if newData:
self.process_received_package(rev)
# check if thread should go on
self.lock.acquire()
goOn = self.thread_goOn
self.lock.release()
# Thread ended
self.sock_recv.close()
def process_received_package(self, rev):
SenderId, Counter, SourceId = struct.unpack_from('<iii', rev)
if SourceId == -1 and len(self.blocks) > 0:
# data stream finished
self.process_finished_action(SourceId, rev)
self.signal_values = {}
if SourceId >= 0 and len(self.blocks) > 0:
# got data stream
self.process_data_stream(SourceId, rev)
if SourceId == -2:
# new config in ORTD available
# send trigger to get new config
self.request_new_config_from_ORTD()
self.config_complete = False
self.config_buffer = {}
if SourceId == -100:
self.PAPI_SIMULINK_BLOCK = True
# got data stream from the PaPI-Simulink Block
self.process_papi_data_stream(rev)
if SourceId == -4:
self.PAPI_SIMULINK_BLOCK = False
# new configItem
# print("Part of a new configuration");
# receive new config item and execute cfg in PaPI
# unpack package
i = 16 # Offset: 4 ints
unp = ''
while i < len(rev):
unp = unp + str(struct.unpack_from('<s',rev,i)[0])[2]
i += 1
if int(Counter) not in self.config_buffer:
self.config_buffer[int(Counter)] = unp
else:
if self.config_buffer[int(Counter)] != unp:
self.config_buffer = {}
# Check counter key series for holes
counters = list(self.config_buffer.keys())
counters.sort()
i = 1
config_file = ''
self.config_complete = True
for c in counters:
if i == c:
config_file += self.config_buffer[c]
i += 1
else:
self.config_complete = False
break
if self.config_complete:
if not self.check_and_process_cfg(config_file):
self.start_timeout_timer()
else:
self.stop_timeout_timer()
else:
self.start_timeout_timer()
def start_timeout_timer(self):
if not self.timer_active:
self.timer = Timer(3,self.callback_timeout_timer)
self.timer.start()
self.timer_active = True
else:
self.timer.cancel()
self.timer = Timer(3,self.callback_timeout_timer)
self.timer.start()
def stop_timeout_timer(self):
if self.timer_active:
self.timer.cancel()
self.timer_active = False
#self.config_buffer = {}
def callback_timeout_timer(self):
print('ORTD_PLUGIN: Config timeout, requesting a new config')
self.timer_active = False
self.config_buffer = {}
self.request_new_config_from_ORTD()
def request_new_config_from_ORTD(self):
Counter = 1
data = struct.pack('<iiid', 12, Counter, int(-3), float(0))
if self.UseSocketIO:
print ("Requesting config via socket.io")
self.sio.emit('ORTDPACKET', base64.b64encode(data).decode('ascii') )
# TODO test this
else:
if not self.sendOnReceivePort:
self.sock_parameter.sendto(data, (self.HOST, self.OUT_PORT))
else:
self.sock_recv.sendto(data, (self.HOST, self.SOURCE_PORT))
def check_and_process_cfg(self, config_file):
try:
# config completely received
# extract new configuration
cfg = json.loads(config_file)
ORTDSources, ORTDParameters, plToCreate, \
plToClose, subscriptions, paraConnections, activeTab = self.extract_config_elements(cfg)
if self.hasInitialConfig and self.onlyInitialConfig:
return True
self.hasInitialConfig = True
self.update_block_list(ORTDSources)
self.update_parameter_list(ORTDParameters)
self.process_papi_configuration(plToCreate, plToClose, subscriptions, paraConnections, activeTab)
return True
except ValueError as e:
return False
def process_papi_configuration(self, toCreate, toClose, subs, paraConnections, activeTab):
self.pl_send_new_data('ControllerSignals', [1], {'ControlSignalReset': 1,
'ControlSignalCreate':None,
'ControlSignalSub':None,
'ControllerSignalParameter':None,
'ControllerSignalClose':None,
'ActiveTab': None })
self.pl_send_new_data('ControllerSignals', [1], {'ControlSignalReset':0,
'ControlSignalCreate':toCreate,
'ControlSignalSub':subs,
'ControllerSignalParameter':paraConnections,
'ControllerSignalClose':toClose,
'ActiveTab': activeTab})
def parse_json_stream(self,stream):
decoder = json.JSONDecoder()
while stream:
obj, idx = decoder.raw_decode(stream)
yield obj
stream = stream[idx:].lstrip()
def update_parameter_list(self, ORTDParameter):
newList ={}
for para_id in ORTDParameter:
para_name = ORTDParameter[para_id]['ParameterName']
if para_name in self.parameters:
para_object = self.parameters.pop(para_name)
else:
val_count = int(ORTDParameter[para_id]['NValues'])
opt_object = OptionalObject(para_id, val_count)
if "initial_value" in ORTDParameter[para_id]:
val = ORTDParameter[para_id]['initial_value']
if val_count > 1:
val = val[1:-1]
init_value = list(map(float,val.split(',')))
else:
init_value = float(val)
else:
init_value = 0
para_object = DParameter(para_name, default=str(init_value), OptionalObject=opt_object)
self.pl_send_new_parameter_list([para_object])
newList[para_name] = para_object
toDeleteDict = self.parameters
self.parameters = newList
for par in toDeleteDict:
self.pl_send_delete_parameter(par)
def update_block_list(self,ORTDSources):
#self.block_id = self.block_id +1
#newBlock = DBlock('SourceGroup'+str(self.block_id))
#self.blocks['SourceGroup'+str(self.block_id)] = newBlock
if 'SourceGroup0' in self.blocks:
self.pl_send_delete_block('SourceGroup0')
newBlock = DBlock('SourceGroup0')
self.blocks['SourceGroup0'] = newBlock
self.Sources = ORTDSources
keys = list(self.Sources.keys())
for key in keys:
Source = self.Sources[key]
sig_name = Source['SourceName']
newBlock.add_signal(DSignal(sig_name))
self.pl_send_new_block_list([newBlock])
# Remove BLOCKS
#if 'SourceGroup'+str(self.block_id-1) in self.blocks:
#self.pl_send_delete_block(self.blocks.pop('SourceGroup'+str(self.block_id-1)).name)
def process_papi_data_stream(self, rev):
timestamp = None
offset = 4*4
for i in range(len(self.Sources)):
try:
val = []
# offset += i*(4+4+4)
# Get current signal ID:
# signal_id,data = struct.unpack_from('<id', rev, offset)
signal_id, data = struct.unpack_from('<id', rev, offset)
# print('Offset=' + str(offset))
#
# print('SignalID: ' + str(signal_id))
# print('Data: ' + str(data))
if str(signal_id) in self.Sources:
Source = self.Sources[str(signal_id)]
NValues = int(Source['NValues_send'])
# print("NValues : " + str(NValues))
#print("Offset:" + str(offset))
offset += 4
for n in range(NValues):
# print('#Value=' + str(n))
# print('Offset=' + str(offset))
try:
data = struct.unpack_from('<d', rev, offset)[0]
# print('Data=' + str(data))
val.append(data)
except struct.error:
# print(sys.exc_info()[0])
# print('!!!! except !!!!')
val.append(0)
offset += 8
# print('Data: ' + str(val))
# if NValues > 1:
# signal_id,data = struct.unpack_from('<id%sd' %NValues, rev, offset)
# offset += (NValues-1)*(4+4)
if self.Sources[str(signal_id)]["SourceName"] == "SourceTime":
timestamp = val[0]
self.signal_values[signal_id] = val
#print("Signal: " + str(signal_id) + " Data: " + str(data) );
except struct.error:
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print(sys.exc_info()[1])
print("Can't unpack.")
self.process_finished_action(-1,None, timestamp)
def process_data_stream(self, SourceId, rev):
# Received a data packet
# Lookup the Source behind the given SourceId
if str(SourceId) in self.Sources:
Source = self.Sources[str(SourceId)]
NValues = int(Source['NValues_send'])
# Read NVales from the received packet
val = []
for i in range(NValues):
try:
val.append(struct.unpack_from('<d', rev, 3 * 4 + i * 8)[0])
except:
val.append(0)
self.signal_values[SourceId] = val
else:
dp_info = self.pl_get_dplugin_info()
print('ORTD_PLUGIN - '+dp_info.uname+': received data with an unknown id ('+str(SourceId)+')')
def process_finished_action(self, SourceId, rev, timestamp=None):
if SourceId == -1:
# unpack group ID
# GroupId = struct.unpack_from('<i', rev, 3 * 4)[0]
self.t += 1.0
keys = list(self.signal_values.keys())
keys.sort() # REMARK: Die liste keys nur einmal sortieren; bei initialisierung
signals_to_send = {}
for key in keys:
if str(key) in self.Sources:
sig_name = self.Sources[str(key)]['SourceName']
signals_to_send[sig_name] = self.signal_values[key]
if len( list(self.blocks.keys()) ) >0:
block = list(self.blocks.keys())[0]
if len(self.blocks[block].signals) == len(signals_to_send)+1:
if timestamp is None:
self.pl_send_new_data(block, [self.t], signals_to_send )
else:
self.pl_send_new_data(block, [timestamp], signals_to_send )
def cb_execute(self, Data=None, block_name = None, plugin_uname = None):
raise Exception('Should not be called!')
def cb_set_parameter(self, name, value):
if name in self.parameters:
parameter = self.parameters[name]
Pid = parameter.OptionalObject.ORTD_par_id
Counter = 111
if value is not None:
data = None
# get values in float from string
valueCast = ast.literal_eval(value)
# check is it is a list, if not, cast to list
if not isinstance(valueCast,list):
valueCast = [valueCast]
if self.PAPI_SIMULINK_BLOCK:
data = struct.pack('<iii%sd' %len(valueCast), 12, Counter, int(Pid),*valueCast)
else:
data = struct.pack('<iii%sd' %len(valueCast), 12, Counter, int(Pid),*valueCast)
#if isinstance(valueCast, list):
#data += struct.pack('%sd' %len(valueCast),*valueCast)
# for i in range(0,len(valueCast)):
# data += struct.pack('<d',valueCast[i])
#else:
# data += struct.pack('d',float(value))
if self.UseSocketIO:
print ("Setting parameter via socket.io")
self.sio.emit('ORTDPACKET', base64.b64encode(data).decode('ascii') )
# TODO test this
else:
if not self.sendOnReceivePort:
self.sock_parameter.sendto(data, (self.HOST, self.OUT_PORT))
else:
self.sock_recv.sendto(data, (self.HOST, self.SOURCE_PORT))
else:
if name == 'consoleIn' and self.UseSocketIO:
self.sio.emit('ConsoleCommand', { 'ConsoleId' : '1' , 'Data' : value })
def cb_quit(self):
self.lock.acquire()
self.thread_goOn = False
self.lock.release()
self.thread.join()
if not self.sendOnReceivePort:
self.sock_parameter.close()
print('ORTD-Plugin will quit')
def cb_plugin_meta_updated(self):
pass
def plconf(self):
cfg = {}
subs = {}
paras = {}
close = {}
if 'PaPIConfig' in self.ProtocolConfig:
if 'ToCreate' in self.ProtocolConfig['PaPIConfig']:
cfg = self.ProtocolConfig['PaPIConfig']['ToCreate']
if 'ToSub' in self.ProtocolConfig['PaPIConfig']:
subs = self.ProtocolConfig['PaPIConfig']['ToSub']
if 'ToControl' in self.ProtocolConfig['PaPIConfig']:
paras = self.ProtocolConfig['PaPIConfig']['ToControl']
if 'ToClose' in self.ProtocolConfig['PaPIConfig']:
close = self.ProtocolConfig['PaPIConfig']['ToClose']
return cfg, subs, paras, close
def extract_config_elements(self, configuration):
plToCreate = {}
subscriptions = {}
paraConnections = {}
plToClose = {}
ORTDSources = {}
ORTDParameters = {}
activeTab = 'PaPI-Tab'
if 'PaPIConfig' in configuration:
if 'ToCreate' in configuration['PaPIConfig']:
plToCreate = configuration['PaPIConfig']['ToCreate']
if 'ToSub' in configuration['PaPIConfig']:
subscriptions = configuration['PaPIConfig']['ToSub']
if 'ToControl' in configuration['PaPIConfig']:
paraConnections = configuration['PaPIConfig']['ToControl']
if 'ToClose' in configuration['PaPIConfig']:
plToClose = configuration['PaPIConfig']['ToClose']
if 'ActiveTab' in configuration['PaPIConfig']:
activeTab = configuration['PaPIConfig']['tab']
if 'SourcesConfig' in configuration:
ORTDSources = configuration['SourcesConfig']
if 'ParametersConfig' in configuration:
ORTDParameters = configuration['ParametersConfig']
return ORTDSources, ORTDParameters, plToCreate, plToClose, subscriptions, paraConnections, activeTab
| gpl-3.0 | -5,428,349,017,113,714,000 | 33.814527 | 123 | 0.535914 | false |
craigderington/studentloan5 | tests/engine.py | 1 | 5088 | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("studentloan5", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("studentloan5", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| bsd-3-clause | -3,658,331,897,922,075,600 | 33.378378 | 114 | 0.612225 | false |
equitania/myodoo-addons-v10 | eq_project/models/__init__.py | 1 | 1068 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import eq_project_extension
import eq_extension_project_task_type
| agpl-3.0 | 3,027,832,211,828,780,000 | 47.545455 | 79 | 0.620787 | false |
opnsense/core | src/opnsense/scripts/netflow/lib/flowparser.py | 1 | 8475 | """
Copyright (c) 2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
flowd log parser
"""
import struct
import syslog
from socket import inet_ntop, AF_INET, AF_INET6, ntohl
class FlowParser:
# fields in order of appearance, use bitmask compare
field_definition_order = [
'tag',
'recv_time',
'proto_flags_tos',
'agent_addr4',
'agent_addr6',
'src_addr4',
'src_addr6',
'dst_addr4',
'dst_addr6',
'gateway_addr4',
'gateway_addr6',
'srcdst_port',
'packets',
'octets',
'if_indices',
'agent_info',
'flow_times',
'as_info',
'flow_engine_info'
]
# extract definition, integer values are read as rawdata (not parsed)
field_definition = {
'tag': 'I',
'recv_time': '>II',
'proto_flags_tos': 'BBBB',
'agent_addr4': 4,
'agent_addr6': 16,
'src_addr4': 4,
'src_addr6': 16,
'dst_addr4': 4,
'dst_addr6': 16,
'gateway_addr4': 4,
'gateway_addr6': 16,
'srcdst_port': '>HH',
'packets': '>Q',
'octets': '>Q',
'if_indices': '>II',
'agent_info': '>IIIHH',
'flow_times': '>II',
'as_info': 'IIBBH',
'flow_engine_info': 'HHII'
}
def __init__(self, filename, recv_stamp=None):
self._filename = filename
self._recv_stamp = recv_stamp
# cache formatter vs byte length
self._fmt_cache = dict()
# pre-calculate powers of 2
self._pow = dict()
for idx in range(len(self.field_definition_order)):
self._pow[idx] = pow(2, idx)
def calculate_size(self, fmt):
if fmt not in self._fmt_cache:
fmts = {'B': 1, 'H': 2, 'I': 4, 'Q': 8}
self._fmt_cache[fmt] = 0
for key in fmt:
if key in fmts:
self._fmt_cache[fmt] += fmts[key]
return self._fmt_cache[fmt]
def _parse_binary(self, raw_data, data_fields):
""" parse binary record
:param raw_data: binary data record
:param data_fields: field bitmask, provided by header
:return: dict
"""
raw_data_idx = 0
raw_record = dict()
for idx in range(len(self.field_definition_order)):
if self._pow[idx] & data_fields:
fieldname = self.field_definition_order[idx]
if fieldname in self.field_definition:
if type(self.field_definition[fieldname]) is int:
fsize = self.field_definition[fieldname]
raw_record[fieldname] = raw_data[raw_data_idx:raw_data_idx + fsize]
else:
fsize = self.calculate_size(self.field_definition[fieldname])
try:
content = struct.unpack(
self.field_definition[fieldname],
raw_data[raw_data_idx:raw_data_idx + fsize]
)
raw_record[fieldname] = content[0] if len(content) == 1 else content
except struct.error as e:
# the flowd record doesn't appear to be as expected, log for now.
syslog.syslog(syslog.LOG_NOTICE, "flowparser failed to unpack %s (%s)" % (fieldname, e))
raw_data_idx += fsize
return raw_record
def __iter__(self):
""" iterate flowd log file
:return:
"""
# pre-compile address formatters to save time
with open(self._filename, 'rb') as flowh:
while True:
# header [version, len_words, reserved, fields]
hdata = flowh.read(8)
if hdata == b'':
break
header = struct.unpack('BBHI', hdata)
record = self._parse_binary(
raw_data=flowh.read(header[1] * 4),
data_fields=ntohl(header[3])
)
if 'recv_time' not in record or 'agent_info' not in record:
# XXX invalid (empty?) flow record.
continue
record['recv_sec'] = record['recv_time'][0]
if self._recv_stamp is not None and record['recv_sec'] < self._recv_stamp:
# self._recv_stamp can contain the last received timestamp, in which case
# we should not return older data. The exact timestamp will be returned, so the
# consumer knows it doesn't have to read other, older, flowd log files
continue
record['sys_uptime_ms'] = record['agent_info'][0]
record['netflow_ver'] = record['agent_info'][3]
record['recv'] = record['recv_sec']
record['recv_usec'] = record['recv_time'][1]
record['if_ndx_in'] = -1
record['if_ndx_out'] = -1
record['src_port'] = 0
record['dst_port'] = 0
record['protocol'] = 0
if 'proto_flags_tos' in record:
record['tcp_flags'] = record['proto_flags_tos'][0]
record['protocol'] = record['proto_flags_tos'][1]
record['tos'] = record['proto_flags_tos'][2]
if 'flow_times' in record:
record['flow_start'] = record['flow_times'][0]
record['flow_finish'] = record['flow_times'][1]
else:
record['flow_start'] = record['sys_uptime_ms']
record['flow_finish'] = record['sys_uptime_ms']
if 'if_indices' in record:
record['if_ndx_in'] = record['if_indices'][0]
record['if_ndx_out'] = record['if_indices'][1]
if 'srcdst_port' in record:
record['src_port'] = record['srcdst_port'][0]
record['dst_port'] = record['srcdst_port'][1]
# concat ipv4/v6 fields into field without [4,6]
for key in self.field_definition_order:
if key in record:
if key[-1] == '4':
record[key[:-1]] = inet_ntop(AF_INET, record[key])
elif key[-1] == '6':
record[key[:-1]] = inet_ntop(AF_INET6, record[key])
# calculated values
record['flow_end'] = record['recv_sec'] - (record['sys_uptime_ms'] - record['flow_finish']) / 1000.0
record['duration_ms'] = (record['flow_finish'] - record['flow_start'])
record['flow_start'] = record['flow_end'] - record['duration_ms'] / 1000.0
if 'packets' not in record or 'octets' not in record or 'src_addr' not in record or 'dst_addr' not in record:
# this can't be useful data, skip record
continue
yield record
| bsd-2-clause | 991,364,815,688,320,300 | 41.80303 | 125 | 0.520354 | false |
paulbersch/django-locus | locus/utils/location.py | 1 | 1360 | import math
# add back later
# import GeoIP
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = math.pi / 180.0
milesPerNauticalMile = 1.15078
def calcDistance(lat1, lon1, lat2, lon2):
"""
Caclulate distance between two lat lons in NM
"""
lat1 = float(lat1)
lat2 = float(lat2)
lon1 = float(lon1)
lon2 = float(lon2)
yDistance = (lat2 - lat1) * nauticalMilePerLat
xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2)
distance = math.sqrt( yDistance**2 + xDistance**2 )
return distance * milesPerNauticalMile
def milesBox( lat, lon, radius ):
"""
Returns two lat/lon pairs as (lat1, lon2, lat2, lon2) which define a box that exactly surrounds
a circle of radius of the given amount in miles.
"""
# this gives us a tuple of values that can easily be used to get a list of "possibly close"
# dealers. then we use the calcDistance function to check if it's ACTUALLY within the radius.
latRange = radius / ( milesPerNauticalMile * 60.0 )
lonRange = radius / ( math.cos(lat * rad) * milesPerNauticalMile * 60.0)
return ( lat - latRange, lon - lonRange, lat + latRange, lon + lonRange )
def revLookup(ip):
return False
"""
gi = GeoIP.open("/usr/local/share/GeoIP/GeoLiteCity.dat",GeoIP.GEOIP_STANDARD)
return gi.record_by_addr(ip)
"""
| mit | 4,176,555,150,693,318,000 | 28.565217 | 107 | 0.696324 | false |
MinoMino/minqlx | python/minqlx/_handlers.py | 1 | 18535 | # minqlx - Extends Quake Live's dedicated server with extra functionality and scripting.
# Copyright (C) 2015 Mino <[email protected]>
# This file is part of minqlx.
# minqlx is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# minqlx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with minqlx. If not, see <http://www.gnu.org/licenses/>.
import minqlx
import collections
import sched
import re
# ====================================================================
# REGULAR EXPRESSIONS
# ====================================================================
_re_say = re.compile(r"^say +\"?(?P<msg>.+)\"?$", flags=re.IGNORECASE)
_re_say_team = re.compile(r"^say_team +\"?(?P<msg>.+)\"?$", flags=re.IGNORECASE)
_re_callvote = re.compile(r"^(?:cv|callvote) +(?P<cmd>[^ ]+)(?: \"?(?P<args>.+?)\"?)?$", flags=re.IGNORECASE)
_re_vote = re.compile(r"^vote +(?P<arg>.)", flags=re.IGNORECASE)
_re_team = re.compile(r"^team +(?P<arg>.)", flags=re.IGNORECASE)
_re_vote_ended = re.compile(r"^print \"Vote (?P<result>passed|failed).\n\"$")
_re_userinfo = re.compile(r"^userinfo \"(?P<vars>.+)\"$")
# ====================================================================
# LOW-LEVEL HANDLERS
# These are all called by the C code, not within Python.
# ====================================================================
def handle_rcon(cmd):
"""Console commands that are to be processed as regular pyminqlx
commands as if the owner executes it. This allows the owner to
interact with the Python part of minqlx without having to connect.
"""
try:
minqlx.COMMANDS.handle_input(minqlx.RconDummyPlayer(), cmd, minqlx.CONSOLE_CHANNEL)
except:
minqlx.log_exception()
return True
def handle_client_command(client_id, cmd):
"""Client commands are commands such as "say", "say_team", "scores",
"disconnect" and so on. This function parses those and passes it
on to the event dispatcher.
:param client_id: The client identifier.
:type client_id: int
:param cmd: The command being ran by the client.
:type cmd: str
"""
try:
# Dispatch the "client_command" event before further processing.
player = minqlx.Player(client_id)
retval = minqlx.EVENT_DISPATCHERS["client_command"].dispatch(player, cmd)
if retval is False:
return False
elif isinstance(retval, str):
# Allow plugins to modify the command before passing it on.
cmd = retval
res = _re_say.match(cmd)
if res:
msg = res.group("msg").replace("\"", "")
channel = minqlx.CHAT_CHANNEL
if minqlx.EVENT_DISPATCHERS["chat"].dispatch(player, msg, channel) is False:
return False
return cmd
res = _re_say_team.match(cmd)
if res:
msg = res.group("msg").replace("\"", "")
if player.team == "free": # I haven't tried this, but I don't think it's even possible.
channel = minqlx.FREE_CHAT_CHANNEL
elif player.team == "red":
channel = minqlx.RED_TEAM_CHAT_CHANNEL
elif player.team == "blue":
channel = minqlx.BLUE_TEAM_CHAT_CHANNEL
else:
channel = minqlx.SPECTATOR_CHAT_CHANNEL
if minqlx.EVENT_DISPATCHERS["chat"].dispatch(player, msg, channel) is False:
return False
return cmd
res = _re_callvote.match(cmd)
if res and not minqlx.Plugin.is_vote_active():
vote = res.group("cmd")
args = res.group("args") if res.group("args") else ""
# Set the caller for vote_started in case the vote goes through.
minqlx.EVENT_DISPATCHERS["vote_started"].caller(player)
if minqlx.EVENT_DISPATCHERS["vote_called"].dispatch(player, vote, args) is False:
return False
return cmd
res = _re_vote.match(cmd)
if res and minqlx.Plugin.is_vote_active():
arg = res.group("arg").lower()
if arg == "y" or arg == "1":
if minqlx.EVENT_DISPATCHERS["vote"].dispatch(player, True) is False:
return False
elif arg == "n" or arg == "2":
if minqlx.EVENT_DISPATCHERS["vote"].dispatch(player, False) is False:
return False
return cmd
res = _re_team.match(cmd)
if res:
arg = res.group("arg").lower()
target_team = ""
if arg == player.team[0]:
# Don't trigger if player is joining the same team.
return cmd
elif arg == "f":
target_team = "free"
elif arg == "r":
target_team = "red"
elif arg == "b":
target_team = "blue"
elif arg == "s":
target_team = "spectator"
elif arg == "a":
target_team = "any"
if target_team:
if minqlx.EVENT_DISPATCHERS["team_switch_attempt"].dispatch(player, player.team, target_team) is False:
return False
return cmd
res = _re_userinfo.match(cmd)
if res:
new_info = minqlx.parse_variables(res.group("vars"), ordered=True)
old_info = player.cvars
changed = {}
for key in new_info:
if key not in old_info or (key in old_info and new_info[key] != old_info[key]):
changed[key] = new_info[key]
if changed:
ret = minqlx.EVENT_DISPATCHERS["userinfo"].dispatch(player, changed)
if ret is False:
return False
elif isinstance(ret, dict):
for key in ret:
new_info[key] = ret[key]
cmd = "userinfo \"{}\"".format("".join(["\\{}\\{}".format(key, new_info[key]) for key in new_info]))
return cmd
except:
minqlx.log_exception()
return True
def handle_server_command(client_id, cmd):
try:
# Dispatch the "server_command" event before further processing.
try:
player = minqlx.Player(client_id) if client_id >= 0 else None
except minqlx.NonexistentPlayerError:
return True
retval = minqlx.EVENT_DISPATCHERS["server_command"].dispatch(player, cmd)
if retval is False:
return False
elif isinstance(retval, str):
cmd = retval
res = _re_vote_ended.match(cmd)
if res:
if res.group("result") == "passed":
minqlx.EVENT_DISPATCHERS["vote_ended"].dispatch(True)
else:
minqlx.EVENT_DISPATCHERS["vote_ended"].dispatch(False)
return cmd
except:
minqlx.log_exception()
return True
# Executing tasks right before a frame, by the main thread, will often be desirable to avoid
# weird behavior if you were to use threading. This list will act as a task queue.
# Tasks can be added by simply adding the @minqlx.next_frame decorator to functions.
frame_tasks = sched.scheduler()
next_frame_tasks = collections.deque()
def handle_frame():
"""This will be called every frame. To allow threads to call stuff from the
main thread, tasks can be scheduled using the :func:`minqlx.next_frame` decorator
and have it be executed here.
"""
while True:
# This will run all tasks that are currently scheduled.
# If one of the tasks throw an exception, it'll log it
# and continue execution of the next tasks if any.
try:
frame_tasks.run(blocking=False)
break
except:
minqlx.log_exception()
continue
try:
minqlx.EVENT_DISPATCHERS["frame"].dispatch()
except:
minqlx.log_exception()
return True
try:
while True:
func, args, kwargs = next_frame_tasks.popleft()
frame_tasks.enter(0, 0, func, args, kwargs)
except IndexError:
pass
_zmq_warning_issued = False
_first_game = True
_ad_round_number = 0
def handle_new_game(is_restart):
# This is called early in the launch process, so it's a good place to initialize
# minqlx stuff that needs QLDS to be initialized.
global _first_game
if _first_game:
minqlx.late_init()
_first_game = False
# A good place to warn the owner if ZMQ stats are disabled.
global _zmq_warning_issued
if not bool(int(minqlx.get_cvar("zmq_stats_enable"))) and not _zmq_warning_issued:
logger = minqlx.get_logger()
logger.warning("Some events will not work because ZMQ stats is not enabled. "
"Launch the server with \"zmq_stats_enable 1\"")
_zmq_warning_issued = True
minqlx.set_map_subtitles()
if not is_restart:
try:
minqlx.EVENT_DISPATCHERS["map"].dispatch(
minqlx.get_cvar("mapname"),
minqlx.get_cvar("g_factory"))
except:
minqlx.log_exception()
return True
try:
minqlx.EVENT_DISPATCHERS["new_game"].dispatch()
except:
minqlx.log_exception()
return True
def handle_set_configstring(index, value):
"""Called whenever the server tries to set a configstring. Can return
False to stop the event.
"""
global _ad_round_number
try:
res = minqlx.EVENT_DISPATCHERS["set_configstring"].dispatch(index, value)
if res is False:
return False
elif isinstance(res, str):
value = res
# VOTES
if index == 9 and value:
cmd = value.split()
vote = cmd[0] if cmd else ""
args = " ".join(cmd[1:]) if len(cmd) > 1 else ""
minqlx.EVENT_DISPATCHERS["vote_started"].dispatch(vote, args)
return
# GAME STATE CHANGES
elif index == 0:
old_cs = minqlx.parse_variables(minqlx.get_configstring(index))
if not old_cs:
return
new_cs = minqlx.parse_variables(value)
old_state = old_cs["g_gameState"]
new_state = new_cs["g_gameState"]
if old_state != new_state:
if old_state == "PRE_GAME" and new_state == "IN_PROGRESS":
pass
elif old_state == "PRE_GAME" and new_state == "COUNT_DOWN":
_ad_round_number = 1
minqlx.EVENT_DISPATCHERS["game_countdown"].dispatch()
elif old_state == "COUNT_DOWN" and new_state == "IN_PROGRESS":
pass
#minqlx.EVENT_DISPATCHERS["game_start"].dispatch()
elif old_state == "IN_PROGRESS" and new_state == "PRE_GAME":
pass
elif old_state == "COUNT_DOWN" and new_state == "PRE_GAME":
pass
else:
logger = minqlx.get_logger()
logger.warning("UNKNOWN GAME STATES: {} - {}".format(old_state, new_state))
# ROUND COUNTDOWN AND START
elif index == 661:
cvars = minqlx.parse_variables(value)
if cvars:
if "turn" in cvars:
# it is A&D
if int(cvars["state"]) == 0:
return
# round cvar appears only on round countdown
# and first round is 0, not 1
try:
round_number = int(cvars["round"]) * 2 + 1 + int(cvars["turn"])
_ad_round_number = round_number
except KeyError:
round_number = _ad_round_number
else:
# it is CA
round_number = int(cvars["round"])
if round_number and "time" in cvars:
minqlx.EVENT_DISPATCHERS["round_countdown"].dispatch(round_number)
return
elif round_number:
minqlx.EVENT_DISPATCHERS["round_start"].dispatch(round_number)
return
return res
except:
minqlx.log_exception()
return True
def handle_player_connect(client_id, is_bot):
"""This will be called whenever a player tries to connect. If the dispatcher
returns False, it will not allow the player to connect and instead show them
a message explaining why. The default message is "You are banned from this
server.", but it can be set with :func:`minqlx.set_ban_message`.
:param client_id: The client identifier.
:type client_id: int
:param is_bot: Whether or not the player is a bot.
:type is_bot: bool
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_connect"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_player_loaded(client_id):
"""This will be called whenever a player has connected and finished loading,
meaning it'll go off a bit later than the usual "X connected" messages.
This will not trigger on bots.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_loaded"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_player_disconnect(client_id, reason):
"""This will be called whenever a player disconnects.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_disconnect"].dispatch(player, reason)
except:
minqlx.log_exception()
return True
def handle_player_spawn(client_id):
"""Called when a player spawns. Note that a spectator going in free spectate mode
makes the client spawn, so you'll want to check for that if you only want "actual"
spawns.
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_spawn"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_kamikaze_use(client_id):
"""This will be called whenever player uses kamikaze item.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["kamikaze_use"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_kamikaze_explode(client_id, is_used_on_demand):
"""This will be called whenever kamikaze explodes.
:param client_id: The client identifier.
:type client_id: int
:param is_used_on_demand: Non-zero if kamikaze is used on demand.
:type is_used_on_demand: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["kamikaze_explode"].dispatch(player, True if is_used_on_demand else False)
except:
minqlx.log_exception()
return True
def handle_console_print(text):
"""Called whenever the server prints something to the console and when rcon is used."""
try:
if not text:
return
# Log console output. Removes the need to have stdout logs in addition to minqlx.log.
minqlx.get_logger().debug(text.rstrip("\n"))
res = minqlx.EVENT_DISPATCHERS["console_print"].dispatch(text)
if res is False:
return False
if _print_redirection:
global _print_buffer
_print_buffer += text
if isinstance(res, str):
return res
return text
except:
minqlx.log_exception()
return True
_print_redirection = None
_print_buffer = ""
def redirect_print(channel):
"""Redirects print output to a channel. Useful for commands that execute console commands
and want to redirect the output to the channel instead of letting it go to the console.
To use it, use the return value with the "with" statement.
.. code-block:: python
def cmd_echo(self, player, msg, channel):
with minqlx.redirect_print(channel):
minqlx.console_command("echo {}".format(" ".join(msg)))
"""
class PrintRedirector:
def __init__(self, channel):
if not isinstance(channel, minqlx.AbstractChannel):
raise ValueError("The redirection channel must be an instance of minqlx.AbstractChannel.")
self.channel = channel
def __enter__(self):
global _print_redirection
_print_redirection = self.channel
def __exit__(self, exc_type, exc_val, exc_tb):
global _print_redirection
self.flush()
_print_redirection = None
def flush(self):
global _print_buffer
self.channel.reply(_print_buffer)
_print_buffer = ""
return PrintRedirector(channel)
def register_handlers():
minqlx.register_handler("rcon", handle_rcon)
minqlx.register_handler("client_command", handle_client_command)
minqlx.register_handler("server_command", handle_server_command)
minqlx.register_handler("frame", handle_frame)
minqlx.register_handler("new_game", handle_new_game)
minqlx.register_handler("set_configstring", handle_set_configstring)
minqlx.register_handler("player_connect", handle_player_connect)
minqlx.register_handler("player_loaded", handle_player_loaded)
minqlx.register_handler("player_disconnect", handle_player_disconnect)
minqlx.register_handler("player_spawn", handle_player_spawn)
minqlx.register_handler("console_print", handle_console_print)
minqlx.register_handler("kamikaze_use", handle_kamikaze_use)
minqlx.register_handler("kamikaze_explode", handle_kamikaze_explode)
| gpl-3.0 | 3,269,673,672,761,068,500 | 35.201172 | 120 | 0.580523 | false |
lundjordan/services | src/codecoverage/bot/code_coverage_bot/codecov.py | 1 | 7319 | # -*- coding: utf-8 -*-
import json
import os
import tempfile
from datetime import datetime
from datetime import timedelta
import hglib
import requests
from cli_common.log import get_logger
from cli_common.taskcluster import get_service
from cli_common.utils import ThreadPoolExecutorResult
from code_coverage_bot import chunk_mapping
from code_coverage_bot import grcov
from code_coverage_bot import suite_reports
from code_coverage_bot import taskcluster
from code_coverage_bot import uploader
from code_coverage_bot.artifacts import ArtifactsHandler
from code_coverage_bot.github import GitHubUtils
from code_coverage_bot.notifier import Notifier
from code_coverage_bot.phabricator import PhabricatorUploader
from code_coverage_bot.secrets import secrets
from code_coverage_bot.zero_coverage import ZeroCov
logger = get_logger(__name__)
class CodeCov(object):
def __init__(self, revision, cache_root, client_id, access_token):
# List of test-suite, sorted alphabetically.
# This way, the index of a suite in the array should be stable enough.
self.suites = [
'web-platform-tests',
]
self.cache_root = cache_root
assert os.path.isdir(cache_root), 'Cache root {} is not a dir.'.format(cache_root)
self.repo_dir = os.path.join(cache_root, 'mozilla-central')
temp_dir = tempfile.mkdtemp()
self.artifacts_dir = os.path.join(temp_dir, 'ccov-artifacts')
self.ccov_reports_dir = os.path.join(temp_dir, 'code-coverage-reports')
self.client_id = client_id
self.access_token = access_token
self.index_service = get_service('index', client_id, access_token)
self.githubUtils = GitHubUtils(cache_root, client_id, access_token)
if revision is None:
# Retrieve revision of latest codecov build
self.github_revision = uploader.get_latest_codecov()
self.revision = self.githubUtils.git_to_mercurial(self.github_revision)
self.from_pulse = False
else:
self.github_revision = None
self.revision = revision
self.from_pulse = True
self.notifier = Notifier(self.repo_dir, revision, client_id, access_token)
logger.info('Mercurial revision', revision=self.revision)
task_ids = {
'linux': taskcluster.get_task('mozilla-central', self.revision, 'linux'),
'windows': taskcluster.get_task('mozilla-central', self.revision, 'win'),
'android-test': taskcluster.get_task('mozilla-central', self.revision, 'android-test'),
'android-emulator': taskcluster.get_task('mozilla-central', self.revision, 'android-emulator'),
}
self.artifactsHandler = ArtifactsHandler(task_ids, self.artifacts_dir)
def clone_mozilla_central(self, revision):
shared_dir = self.repo_dir + '-shared'
cmd = hglib.util.cmdbuilder('robustcheckout',
'https://hg.mozilla.org/mozilla-central',
self.repo_dir,
purge=True,
sharebase=shared_dir,
revision=revision,
networkattempts=7)
cmd.insert(0, hglib.HGPATH)
proc = hglib.util.popen(cmd)
out, err = proc.communicate()
if proc.returncode:
raise hglib.error.CommandError(cmd, proc.returncode, out, err)
logger.info('mozilla-central cloned')
def go(self):
if self.from_pulse:
commit_sha = self.githubUtils.mercurial_to_git(self.revision)
try:
uploader.get_codecov(commit_sha)
logger.warn('Build was already injested')
return
except requests.exceptions.HTTPError:
pass
with ThreadPoolExecutorResult(max_workers=2) as executor:
# Thread 1 - Download coverage artifacts.
executor.submit(self.artifactsHandler.download_all)
# Thread 2 - Clone mozilla-central.
executor.submit(self.clone_mozilla_central, self.revision)
if self.from_pulse:
self.githubUtils.update_geckodev_repo()
logger.info('GitHub revision', revision=commit_sha)
self.githubUtils.post_github_status(commit_sha)
r = requests.get('https://hg.mozilla.org/mozilla-central/json-rev/%s' % self.revision)
r.raise_for_status()
push_id = r.json()['pushid']
output = grcov.report(
self.artifactsHandler.get(),
source_dir=self.repo_dir,
service_number=push_id,
commit_sha=commit_sha,
token=secrets[secrets.COVERALLS_TOKEN]
)
logger.info('Report generated successfully')
with ThreadPoolExecutorResult(max_workers=2) as executor:
executor.submit(uploader.coveralls, output)
executor.submit(uploader.codecov, output, commit_sha)
logger.info('Upload changeset coverage data to Phabricator')
phabricatorUploader = PhabricatorUploader(self.repo_dir, self.revision)
phabricatorUploader.upload(json.loads(output))
logger.info('Waiting for build to be ingested by Codecov...')
# Wait until the build has been ingested by Codecov.
if uploader.codecov_wait(commit_sha):
logger.info('Build ingested by codecov.io')
self.notifier.notify()
else:
logger.error('codecov.io took too much time to ingest data.')
else:
logger.info('Generating suite reports')
os.makedirs(self.ccov_reports_dir, exist_ok=True)
suite_reports.generate(self.suites, self.artifactsHandler, self.ccov_reports_dir, self.repo_dir)
logger.info('Generating zero coverage reports')
zc = ZeroCov(self.repo_dir)
zc.generate(self.artifactsHandler.get(), self.revision, self.github_revision)
logger.info('Generating chunk mapping')
chunk_mapping.generate(self.repo_dir, self.revision, self.artifactsHandler)
# Index the task in the TaskCluster index at the given revision and as "latest".
# Given that all tasks have the same rank, the latest task that finishes will
# overwrite the "latest" entry.
namespaces = [
'project.releng.services.project.{}.code_coverage_bot.{}'.format(secrets[secrets.APP_CHANNEL], self.revision),
'project.releng.services.project.{}.code_coverage_bot.latest'.format(secrets[secrets.APP_CHANNEL]),
]
for namespace in namespaces:
self.index_service.insertTask(
namespace,
{
'taskId': os.environ['TASK_ID'],
'rank': 0,
'data': {},
'expires': (datetime.utcnow() + timedelta(180)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
}
)
os.chdir(self.ccov_reports_dir)
self.githubUtils.update_codecoveragereports_repo()
| mpl-2.0 | 2,492,991,510,418,303,500 | 39.888268 | 126 | 0.609236 | false |
mediatum/mediatum | utils/hash.py | 1 | 1599 | """
mediatum - a multimedia content repository
Copyright (C) 2007 Arne Seifert <[email protected]>
Copyright (C) 2007 Matthias Kramm <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
import hashlib
from core import db
q = db.query
logg = logging.getLogger(__name__)
def calcChecksum(filename, method):
if os.path.exists(filename):
f = open(filename)
if method == "SHA-1":
h = hashlib.sha1()
else:
h = hashlib.new('ripemd160')
h.update(f.read())
f.close()
return h.hexdigest()
else:
return ""
def calcChecksumFromMetadata(node):
h = hashlib.sha1()
h.update(str(node.id)) # h.update requires string or buffer as argument
h.update(node.getName())
def attributesToString(node):
string = ""
for item in node.attrs.items():
string += item[0] + item[1]
return string
h.update(attributesToString(node))
return h.hexdigest()
| gpl-3.0 | -1,159,093,611,323,354,600 | 27.052632 | 76 | 0.676673 | false |
doismellburning/tox | tests/test_z_cmdline.py | 1 | 20318 | import tox
import py
import pytest
from tox._pytestplugin import ReportExpectMock
try:
import json
except ImportError:
import simplejson as json
pytest_plugins = "pytester"
from tox._cmdline import Session
from tox._config import parseconfig
def test_report_protocol(newconfig):
config = newconfig([], """
[testenv:mypython]
deps=xy
""")
class Popen:
def __init__(self, *args, **kwargs):
pass
def communicate(self):
return "", ""
def wait(self):
pass
session = Session(config, popen=Popen,
Report=ReportExpectMock)
report = session.report
report.expect("using")
venv = session.getvenv("mypython")
venv.update()
report.expect("logpopen")
def test__resolve_pkg(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
spec = distshare.join("pkg123-*")
py.test.raises(tox.exception.MissingDirectory,
'mocksession._resolve_pkg(spec)')
distshare.ensure(dir=1)
py.test.raises(tox.exception.MissingDependency,
'mocksession._resolve_pkg(spec)')
distshare.ensure("pkg123-1.3.5.zip")
p = distshare.ensure("pkg123-1.4.5.zip")
mocksession.report.clear()
result = mocksession._resolve_pkg(spec)
assert result == p
mocksession.report.expect("info", "determin*pkg123*")
distshare.ensure("pkg123-1.4.7dev.zip")
mocksession._clearmocks()
result = mocksession._resolve_pkg(spec)
mocksession.report.expect("warning", "*1.4.7*")
assert result == p
mocksession._clearmocks()
distshare.ensure("pkg123-1.4.5a1.tar.gz")
result = mocksession._resolve_pkg(spec)
assert result == p
def test__resolve_pkg_doubledash(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
p = distshare.ensure("pkg-mine-1.3.0.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
distshare.ensure("pkg-mine-1.3.0a1.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
class TestSession:
def test_make_sdist(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
'''
})
config = parseconfig([])
session = Session(config)
sdist = session.sdist()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist2 = session.sdist()
assert sdist2 == sdist
sdist.write("hello")
assert sdist.stat().size < 10
sdist_new = Session(config).sdist()
assert sdist_new == sdist
assert sdist_new.stat().size > 10
def test_make_sdist_distshare(self, tmpdir, initproj):
distshare = tmpdir.join("distshare")
initproj("example123-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
distshare=%s
''' % distshare
})
config = parseconfig([])
session = Session(config)
sdist = session.sdist()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist_share = config.distshare.join(sdist.basename)
assert sdist_share.check()
assert sdist_share.read("rb") == sdist.read("rb"), (sdist_share, sdist)
def test_log_pcall(self, mocksession):
mocksession.config.logdir.ensure(dir=1)
assert not mocksession.config.logdir.listdir()
action = mocksession.newaction(None, "something")
action.popen(["echo", ])
match = mocksession.report.getnext("logpopen")
assert match[1].outpath.relto(mocksession.config.logdir)
assert match[1].shell == False
def test_summary_status(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
envs = session.venvlist
assert len(envs) == 2
env1, env2 = envs
env1.status = "FAIL XYZ"
assert env1.status
env2.status = 0
assert not env2.status
session._summary()
out, err = capfd.readouterr()
exp = "%s: FAIL XYZ" % env1.envconfig.envname
assert exp in out
exp = "%s: commands succeeded" % env2.envconfig.envname
assert exp in out
def test_getvenv(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
venv1 = session.getvenv("hello")
venv2 = session.getvenv("hello")
assert venv1 is venv2
venv1 = session.getvenv("world")
venv2 = session.getvenv("world")
assert venv1 is venv2
pytest.raises(LookupError, lambda: session.getvenv("qwe"))
# not sure we want this option ATM
def XXX_test_package(cmd, initproj):
initproj("myproj-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'MANIFEST.in': """
include doc
include myproj
""",
'tox.ini': ''
})
result = cmd.run("tox", "package")
assert not result.ret
result.stdout.fnmatch_lines([
"*created sdist package at*",
])
def test_minversion(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
minversion = 6.0
'''
})
result = cmd.run("tox", "-v")
result.stdout.fnmatch_lines([
"*ERROR*tox version is * required is at least 6.0*"
])
assert result.ret
def test_run_custom_install_command_error(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tox.ini': '''
[testenv]
install_command=./tox.ini {opts} {packages}
'''
})
result = cmd.run("tox")
result.stdout.fnmatch_lines([
"ERROR: invocation failed (errno *), args: ['*/tox.ini*",
])
assert result.ret
def test_unknown_interpreter_and_env(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
result = cmd.run("tox", "-exyz")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*",
])
def test_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
def test_skip_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox", "--skip-missing-interpreters")
assert not result.ret
result.stdout.fnmatch_lines([
"*SKIPPED*InterpreterNotFound*xyz_unknown_interpreter*",
])
def test_unknown_dep(cmd, initproj):
initproj("dep123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
deps=qweqwe123
changedir=tests
'''
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*could not install*qweqwe123*",
])
def test_unknown_environment(cmd, initproj):
initproj("env123-0.7", filedefs={
'tox.ini': ''
})
result = cmd.run("tox", "-e", "qpwoei")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*environment*qpwoei*",
])
def test_skip_sdist(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
"""
,
'tox.ini': '''
[tox]
skipsdist=True
[testenv]
commands=python -c "print('done')"
'''
})
result = cmd.run("tox", )
assert result.ret == 0
def test_minimal_setup_py_empty(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_comment_only(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """\n# some comment
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_non_functional(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
import sys
"""
,
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*check setup.py*",
])
def test_sdist_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
"""
,
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*FAIL*could not package project*",
])
def test_package_install_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
from setuptools import setup
setup(
name='pkg123',
description='pkg123 project',
version='0.7',
license='MIT',
platforms=['unix', 'win32'],
packages=['pkg123',],
install_requires=['qweqwe123'],
)
"""
,
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*InvocationError*",
])
class TestToxRun:
@pytest.fixture
def example123(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
changedir=tests
commands= py.test --basetemp={envtmpdir} \
--junitxml=junit-{envname}.xml
deps=pytest
'''
})
def test_toxuone_env(self, cmd, example123):
result = cmd.run("tox")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_different_config_cwd(self, cmd, example123, monkeypatch):
# see that things work with a different CWD
monkeypatch.chdir(cmd.tmpdir)
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_json(self, cmd, example123):
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
jsonpath = cmd.tmpdir.join("res.json")
result = cmd.run("tox", "--result-json", jsonpath)
assert result.ret == 1
data = json.load(jsonpath.open("r"))
verify_json_report_format(data)
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_develop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-vv", "--develop")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
usedevelop=True
"""})
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop_mixed(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv:devenv]
usedevelop=True
[testenv:nondev]
usedevelop=False
"""})
# running only 'devenv' should not do sdist
result = cmd.run("tox", "-vv", "-e", "devenv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
# running all envs should do sdist
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" in result.stdout.str()
def test_test_usedevelop(cmd, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
usedevelop=True
changedir=tests
commands=
py.test --basetemp={envtmpdir} --junitxml=junit-{envname}.xml []
deps=pytest
'''
})
result = cmd.run("tox", "-v")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
assert "sdist-make" not in result.stdout.str()
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
# see that things work with a different CWD
old = cmd.tmpdir.chdir()
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
old.chdir()
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_test_piphelp(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv]
commands=pip -h
[testenv:py26]
basepython=python
[testenv:py27]
basepython=python
"""})
result = cmd.run("tox")
assert not result.ret
def test_notest(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv:py26]
basepython=python
"""})
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*summary*",
"*py26*skipped tests*",
])
result = cmd.run("tox", "-v", "--notest", "-epy26")
assert not result.ret
result.stdout.fnmatch_lines([
"*py26*reusing*",
])
def test_PYC(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("PYTHONDOWNWRITEBYTECODE", 1)
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*create*",
])
def test_env_VIRTUALENV_PYTHON(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("VIRTUALENV_PYTHON", '/FOO')
result = cmd.run("tox", "-v", "--notest")
assert not result.ret, result.stdout.lines
result.stdout.fnmatch_lines([
"*create*",
])
def test_sdistonly(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-v", "--sdistonly")
assert not result.ret
result.stdout.fnmatch_lines([
"*sdist-make*setup.py*",
])
assert "-mvirtualenv" not in result.stdout.str()
def test_separate_sdist_no_sdistfile(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
assert 'pkg123-0.7.zip' in str(sdistfile)
def test_separate_sdist(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-0.7.zip
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*inst*%s*" % sdistfile,
])
def test_sdist_latest(tmpdir, newconfig):
distshare = tmpdir.join("distshare")
config = newconfig([], """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-*
""" % distshare)
p = distshare.ensure("pkg123-1.4.5.zip")
distshare.ensure("pkg123-1.4.5a1.zip")
session = Session(config)
sdist_path = session.sdist()
assert sdist_path == p
def test_installpkg(tmpdir, newconfig):
p = tmpdir.ensure("pkg123-1.0.zip")
config = newconfig(["--installpkg=%s" % p], "")
session = Session(config)
sdist_path = session.sdist()
assert sdist_path == p
@pytest.mark.xfail("sys.platform == 'win32' and sys.version_info < (2,6)",
reason="test needs better impl")
def test_envsitepackagesdir(cmd, initproj):
initproj("pkg512-0.0.5", filedefs={
'tox.ini': """
[testenv]
commands=
python -c "print(r'X:{envsitepackagesdir}')"
"""})
result = cmd.run("tox")
assert result.ret == 0
result.stdout.fnmatch_lines("""
X:*tox*site-packages*
""")
def verify_json_report_format(data, testenvs=True):
assert data["reportversion"] == "1"
assert data["toxversion"] == tox.__version__
if testenvs:
for envname, envdata in data["testenvs"].items():
for commandtype in ("setup", "test"):
if commandtype not in envdata:
continue
for command in envdata[commandtype]:
assert command["output"]
assert command["retcode"]
pyinfo = envdata["python"]
assert isinstance(pyinfo["version_info"], list)
assert pyinfo["version"]
assert pyinfo["executable"]
| mit | -8,756,750,694,607,493,000 | 28.661314 | 80 | 0.552466 | false |
baseclue/django-rest-test | tests/test_compare.py | 1 | 15578 | import unittest
from rest_test import compare
class DictTestCase(unittest.TestCase):
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b='2',
a=1
)
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=1
)
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=dict(
a='test'
),
c=''
)
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=dict(
b=1
),
c=''
)
)
self.assertFalse(compare(data, expected_data))
class ItemEllipsisTestCase(unittest.TestCase):
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b='2',
a=...
)
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=...
)
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=...,
c=''
)
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=3,
b=...,
c=''
)
)
self.assertFalse(compare(data, expected_data))
def test_missing_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
a=...
)
self.assertFalse(compare(data, expected_data))
def test_moreover_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=...,
c='test'
)
self.assertFalse(compare(data, expected_data))
def test_missing_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=...,
)
)
self.assertFalse(compare(data, expected_data))
def test_moreover_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=3,
b=...,
c='',
d='test'
)
)
self.assertFalse(compare(data, expected_data))
class DictEllipsisTestCase(unittest.TestCase):
def test_empty(self):
data = dict(
)
expected_data = {
...: ...
}
assert compare(data, expected_data)
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = {
...: ...
}
assert compare(data, expected_data)
def test_basic_more(self):
data = {
'a': 1,
'b': '2',
'c': 3
}
expected_data = {
...: ...,
'b': '2'
}
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = {
'b': 2,
...: ...
}
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 2,
...: ...,
'c': ''
}
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 3,
...: ...,
'c': ''
}
)
self.assertFalse(compare(data, expected_data))
def test_moreover_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = {
'b': 2,
...: ...,
'c': 'test'
}
self.assertFalse(compare(data, expected_data))
def test_missing_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 2,
...: ...
}
)
assert compare(data, expected_data)
def test_moreover_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 3,
...: ...,
'c': '',
'd': 'test'
}
)
self.assertFalse(compare(data, expected_data))
def test_bad_usage(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = {
'a': 1,
...: dict(
b=dict(
a='test'
),
a=2,
c=''
)
}
with self.assertRaises(TypeError):
compare(data, expected_data)
class ListTestCase(unittest.TestCase):
def test_basic(self):
data = [
1,
'2'
]
expected_data = [
1,
'2'
]
assert compare(data, expected_data)
def test_basic_false(self):
data = [
1,
2
]
expected_data = [
2,
1
]
self.assertFalse(compare(data, expected_data))
def test_combination(self):
data = [
dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
),
dict(
a=2,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
]
expected_data = [
dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
),
dict(
a=2,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
]
assert compare(data, expected_data)
class ListEllipsisTestCase(unittest.TestCase):
def test_empty(self):
data = [
'1',
{},
3
]
expected_data = [
...
]
assert compare(data, expected_data)
def test_start(self):
data = [
'1',
{},
3
]
expected_data = [
...,
3
]
assert compare(data, expected_data)
def test_multiple(self):
data = [
'1',
2,
3,
'4',
5
]
expected_data = [
...,
2,
...
]
assert compare(data, expected_data)
def test_end(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
1,
...
]
assert compare(data, expected_data)
def test_multiple_in(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
2,
...,
5,
...
]
assert compare(data, expected_data)
def test_start_false(self):
data = [
1,
2,
3
]
expected_data = [
...,
4
]
self.assertFalse(compare(data, expected_data))
def test_multiple_false(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
...,
6,
...
]
self.assertFalse(compare(data, expected_data))
def test_end_false(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
2,
...
]
self.assertFalse(compare(data, expected_data))
def test_multiple_in_optional(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
2,
...,
3,
...
]
assert compare(data, expected_data)
def test_multiple_in_optional_between(self):
data = [
2,
3,
]
expected_data = [
...,
2,
...,
3,
...
]
assert compare(data, expected_data)
def test_bad_usage(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
...,
7
]
with self.assertRaises(TypeError):
compare(data, expected_data)
def test_one(self):
data = [1]
expected_data = [..., 1, ...]
assert compare(data, expected_data)
class CombinationEllipsisTestCase(unittest.TestCase):
def test_combination(self):
data = [
{
'foo': 1,
'bar': 2,
'zoo': 3,
}
]
expected_data = [
...,
{
...: ...,
'bar': 2
},
...
]
assert compare(data, expected_data)
def test_combination_empty(self):
data = [
{
}
]
expected_data = [
...,
{
...: ...,
},
...
]
assert compare(data, expected_data)
class TypeTestCase(unittest.TestCase):
def test_list(self):
data = [
'1',
{},
3
]
expected_data = list
assert compare(data, expected_data)
def test_dict(self):
data = {
'1': 2,
2: 3,
3: 2
}
expected_data = dict
assert compare(data, expected_data)
def test_list_with_dict(self):
data = [
'1',
{'test': 'test_value'},
3
]
expected_data = [
'1',
dict,
3
]
assert compare(data, expected_data)
def test_dict_with_list(self):
data = {
'1': 2,
'test_key': [1, 2, 'u'],
3: 2
}
expected_data = {
'1': 2,
'test_key': list,
3: 2
}
assert compare(data, expected_data)
def test_different_types_in_list(self):
data = [
'1',
{},
3
]
expected_data = [
str,
dict,
int
]
assert compare(data, expected_data)
def test_different_types_in_dict(self):
data = {
'1': 2,
2: 'test',
3: [1, 2, 3]
}
expected_data = {
'1': int,
2: str,
3: list
}
assert compare(data, expected_data)
def test_different_types_in_dict_in_deep(self):
data = [
'1',
{
'1': 2,
2: 'test',
3: [1, 2, 3]
},
3
]
expected_data = [
'1',
{
'1': int,
2: str,
3: list
},
3
]
assert compare(data, expected_data)
class CombinationTypeEllipsisTestCase(unittest.TestCase):
def test_combination(self):
data = [
{
'foo': 1,
'bar': 2,
'zoo': 3,
},
{
'test_foo': '1',
'test_bar': 2,
'test_zoo': [1, 2, 3],
},
]
expected_data = [
...,
{
...: ...,
'bar': int
},
...,
{
'test_foo': str,
'test_bar': 2,
'test_zoo': list,
}
]
assert compare(data, expected_data)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,426,249,069,602,658,000 | 18.399751 | 57 | 0.29991 | false |
apeyrard/sjtu-work | DIP/exercises/ex3/ex3.py | 1 | 3652 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
from PIL import Image
import numpy as np
import math
import argparse
def getMatrix(image):
data = list(image.getdata())
width, height = image.size
matrix = np.array(data).reshape(height,width)
return matrix
def getData(matrix):
data = list(matrix.reshape(matrix.shape[0]*matrix.shape[1]))
return data
def preprocessing(matrix):
newMat = matrix.copy()
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
newMat[x][y] = newMat[x][y]*(-1)**(x+y)
return newMat
def postprocessing(matrix):
return preprocessing(matrix)
def ideal(matrix, cutoff, function):
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
if dist > cutoff:
newMat[x][y] = 0+0j
if function == 'high':
if dist < cutoff:
newMat[x][y] = 0+0j
return newMat
def butter(matrix, order, cutoff, function):
if order is None:
print("Order must be specified for butterworth filter")
sys.exit(1)
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
newMat[x][y] = newMat[x][y] * (1/(1+(dist/cutoff)**(2*order)))
if function == 'high':
newMat[x][y] = newMat[x][y] * (1-(1/(1+(dist/cutoff)**(2*order))))
return newMat
def gauss(matrix, cutoff, function):
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
newMat[x][y] = newMat[x][y] * (math.exp(-(dist**2)/(2*(cutoff**2))))
if function == 'high':
newMat[x][y] = newMat[x][y] * (1- (math.exp(-(dist**2)/(2*(cutoff**2)))))
return newMat
parser = argparse.ArgumentParser(description='Filtering in frequency domain')
parser.add_argument('--ideal', action='store_true')
parser.add_argument('--butterworth', action='store_true')
parser.add_argument('--gaussian', action='store_true')
parser.add_argument('--highpass', action='store_true')
parser.add_argument('--lowpass', action='store_true')
parser.add_argument('cutoff', type=float)
parser.add_argument('--order', type=float)
parser.add_argument('image')
args = parser.parse_args()
try:
with Image.open(args.image) as im:
if args.lowpass:
filtering = 'low'
else:
filtering = 'high'
imNew = Image.new(im.mode, im.size)
matrix = getMatrix(im)
prepMat = preprocessing(matrix)
fourierMat = np.fft.fft2(prepMat)
if args.ideal:
imageF = ideal(fourierMat, args.cutoff, filtering)
elif args.butterworth:
imageF = butter(fourierMat, args.order, args.cutoff, filtering)
else:
imageF = gauss(fourierMat, args.cutoff, filtering)
newImage = np.fft.ifft2(imageF)
postNew = postprocessing(newImage)
imNew.putdata(getData(postNew))
imNew.show()
except FileNotFoundError as e:
sys.exit("Error : file not found")
| mit | 3,300,094,426,064,420,000 | 31.035088 | 89 | 0.589266 | false |
luk156/minimo | minimo/documento/migrations/0004_auto__add_unitamisura__add_field_riga_unita.py | 1 | 10331 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UnitaMisura'
db.create_table(u'documento_unitamisura', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nome', self.gf('django.db.models.fields.CharField')(default='Numero', max_length=30)),
('sigla', self.gf('django.db.models.fields.CharField')(default='N', max_length=4)),
('stato', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'documento', ['UnitaMisura'])
# Adding field 'Riga.unita'
db.add_column(u'documento_riga', 'unita',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['documento.UnitaMisura'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'UnitaMisura'
db.delete_table(u'documento_unitamisura')
# Deleting field 'Riga.unita'
db.delete_column(u'documento_riga', 'unita_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'documento.documento': {
'Meta': {'ordering': "['data']", 'object_name': 'Documento'},
'bollo': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'cap': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'citta': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'cod_fiscale': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.DateField', [], {}),
'data_consegna': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'descrizione_ritenuta': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importo_residuo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'numero': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'p_iva': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'pagamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Pagamento']", 'null': 'True', 'blank': 'True'}),
'provincia': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ragione_sociale': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'riferimento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Documento']", 'null': 'True', 'blank': 'True'}),
'ritenuta': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'sconto': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documento_template'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['template.TemplateDocumento']"}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'valore_bollo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'via': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
u'documento.pagamento': {
'Meta': {'object_name': 'Pagamento'},
'giorni': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intestazione': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'istituto': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'documento.riga': {
'Meta': {'object_name': 'Riga'},
'codice': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '70', 'null': 'True', 'blank': 'True'}),
'descrizione': ('django.db.models.fields.TextField', [], {}),
'descrizione_imposta': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '70', 'null': 'True', 'blank': 'True'}),
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importo_unitario': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'imposta': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'quantita': ('django.db.models.fields.FloatField', [], {}),
'unita': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['documento.UnitaMisura']", 'null': 'True', 'blank': 'True'})
},
u'documento.unitamisura': {
'Meta': {'object_name': 'UnitaMisura'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'default': "'Numero'", 'max_length': '30'}),
'sigla': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '4'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'template.templatedocumento': {
'Meta': {'object_name': 'TemplateDocumento'},
'descrizione': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '70'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['documento'] | gpl-2.0 | 823,570,124,755,032,000 | 74.416058 | 209 | 0.55106 | false |
fzimmermann89/pyload | module/plugins/hoster/FastixRu.py | 1 | 1366 | # -*- coding: utf-8 -*-
import re
import urllib
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
from module.plugins.internal.utils import json
class FastixRu(MultiHoster):
__name__ = "FastixRu"
__type__ = "hoster"
__version__ = "0.17"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/\w{24}'
__config__ = [("activated", "bool", "Activated", True),
("use_premium" , "bool", "Use premium account if available" , True),
("revertfailed", "bool", "Revert to standard download if fails", True)]
__description__ = """Fastix multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Massimo Rosamilia", "[email protected]")]
def setup(self):
self.chunk_limit = 3
def handle_premium(self, pyfile):
self.html = self.load("http://fastix.ru/api_v2/",
get={'apikey': self.account.get_data('apikey'),
'sub' : "getdirectlink",
'link' : pyfile.url})
data = json.loads(self.html)
self.log_debug("Json data", data)
if "error\":true" in self.html:
self.offline()
else:
self.link = data['downloadlink']
getInfo = create_getInfo(FastixRu)
| gpl-3.0 | 3,599,834,605,401,120,300 | 29.355556 | 90 | 0.533675 | false |
Purg/kwiver | vital/bindings/python/vital/types/landmark_map.py | 1 | 4829 | """
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
vital::landmark_map interface
"""
import ctypes
from vital.types import Landmark
from vital.util import VitalObject, free_void_ptr
class LandmarkMap (VitalObject):
@classmethod
def from_dict(cls, id_lm_d):
"""
Create a new instance of LandmarkMap using the given dictionary mapping
integer IDs to Landmark instances.
:param id_lm_d: dictionary mapping integer IDs to Landmark instances
:type id_lm_d: dict[int|long, vital.types.Landmark]
:return: New landmark map instance containing a copy of the input map.
:rtype: LandmarkMap
"""
s = len(id_lm_d)
t_lm_ids = (ctypes.c_int64 * s)
t_lm_landmarks = (Landmark.c_ptr_type() * s)
lm_ids = t_lm_ids()
lm_landmarks = t_lm_landmarks()
i = 0
for k, l in id_lm_d.iteritems():
lm_ids[i] = k
lm_landmarks[i] = l.c_pointer
i += 1
lm_cptr = cls._call_cfunc(
'vital_landmark_map_new',
[t_lm_landmarks, t_lm_ids, ctypes.c_size_t],
[lm_landmarks, lm_ids, s],
cls.c_ptr_type()
)
return cls(lm_cptr)
def __init__(self, from_cptr=None):
"""
Create and empty map, or initialize from and existing C instance pointer
:param from_cptr: Optional existing landmark map C pointer
"""
super(LandmarkMap, self).__init__(from_cptr)
def _new(self):
return self._call_cfunc(
'vital_landmark_map_new_empty',
restype=self.C_TYPE_PTR
)
def _destroy(self):
self._call_cfunc(
'vital_landmark_map_destroy', [self.C_TYPE_PTR], [self]
)
def __eq__(self, other):
return (
isinstance(other, LandmarkMap) and
self.as_dict() == other.as_dict()
)
def __ne__(self, other):
return not (self == other)
def __len__(self):
return self.size
@property
def size(self):
"""
Get the size of this map
:return: the size of this map
:rtype: int
"""
return self._call_cfunc(
'vital_landmark_map_size',
[self.C_TYPE_PTR], [self],
ctypes.c_size_t
)
def as_dict(self):
"""
Get a copy of this map as a python dictionary
:return: Dictionary mapping landmark IDs to Landmark instances
:rtype: dict[int|long, vital.types.Landmark]
"""
t_lm_ids = ctypes.POINTER(ctypes.c_int64)
t_lm_landmarks = ctypes.POINTER(Landmark.c_ptr_type())
lm_ids = t_lm_ids()
lm_landmarks = t_lm_landmarks()
self._call_cfunc(
'vital_landmark_map_landmarks',
[self.C_TYPE_PTR, ctypes.POINTER(t_lm_ids), ctypes.POINTER(t_lm_landmarks)],
[self, ctypes.byref(lm_ids), ctypes.byref(lm_landmarks)]
)
d = {}
s = self.size
for i in xrange(s):
# Need to copy ctypes pointer object
l_cptr = Landmark.c_ptr_type()(lm_landmarks[i].contents)
d[lm_ids[i]] = Landmark(from_cptr=l_cptr)
free_void_ptr(lm_ids)
free_void_ptr(lm_landmarks)
return d
| bsd-3-clause | -1,191,061,869,346,105,300 | 30.562092 | 88 | 0.61607 | false |
tectronics/openmalaria-git | util/compareOutput.py | 1 | 7178 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of OpenMalaria.
#
# Copyright (C) 2005-2010 Swiss Tropical Institute and Liverpool School Of Tropical Medicine
#
# OpenMalaria is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import math
from optparse import OptionParser
from approxEqual import ApproxSame
from readOutput import readEntries
REL_PRECISION=1e-6
ABS_PRECISION=1e-6
def charEqual (fn1,fn2):
MAX=10*1024
f1 = open(fn1,'r')
f2 = open(fn2,'r')
while True:
s1 = f1.read(MAX)
s2 = f2.read(MAX)
if (len(s1)==0) or (len(s2)==0):
# end of one or both files; equal if it's the end of both
return len(s1) == len(s2)
if s1 != s2:
return False
def main(fn1,fn2,maxDiffsToPrint=6):
"""Takes names of the two files to compare and optionally an argument describing
the maximum number of differences to print directly (note: order is not intuitive).
Returns a tuple ret,ident; ret is 0 if test passes (output considered near-enough equal),
ident is 1 if files are binary-equal."""
ret=0
opt=""
if REL_PRECISION!=1e-6:
opt+=" --rel-prescision="+str(REL_PRECISION)
if ABS_PRECISION!=1e-6:
opt+=" --abs-prescision="+str(ABS_PRECISION)
print "\033[1;34m compareOutput.py"+opt+" "+fn1+" "+fn2+" "+str(maxDiffsToPrint)+"\033[0;0m"
# Read both files and combine into a map of key to pairs (v1, v2)
try:
if charEqual (fn1,fn2):
print "output.txt files are identical"
return 0,True
print "output.txt files aren't binary-equal"
values1=readEntries(fn1)
values2=readEntries(fn2)
# python 3000 syntax is "except IOError as e", backported to 2.6 but not always supported. Old syntax:
except IOError, e:
print str(e)
return 1,False
values=dict()
for (k,v1) in values1.iteritems():
v2=None
if (k in values2):
v2=values2[k]
del values2[k]
values[k] = (v1,v2)
for (k,v2) in values2.iteritems():
values[k] = (None,v2)
# Go through all values:
numPrinted=0
numDiffs=0
numMissing1=0
numMissing2=0
perMeasureNum = dict()
perMeasureTotal1 = dict()
perMeasureTotal2 = dict()
perMeasureNumDiff = dict()
perMeasureDiffSum = dict()
perMeasureDiffAbsSum = dict()
approxSame = ApproxSame(REL_PRECISION, ABS_PRECISION)
for (k,(v1,v2)) in values.iteritems():
if v1==None:
numMissing1 += 1
elif v2==None:
numMissing2 += 1
else:
perMeasureNum[k.a] = perMeasureNum.get(k.a, 0) + 1
perMeasureTotal1[k.a] = perMeasureTotal1.get(k.a, 0.0) + v1
perMeasureTotal2[k.a] = perMeasureTotal2.get(k.a, 0.0) + v2
# Compare with relative precision
if approxSame (v1, v2):
continue
numDiffs += 1
# Sum up total difference per measure
perMeasureDiffSum[k.a] = perMeasureDiffSum.get(k.a,0.0) + v2 - v1
perMeasureDiffAbsSum[k.a] = perMeasureDiffAbsSum.get(k.a,0.0) + math.fabs(v2-v1)
numPrinted += 1
perMeasureNumDiff[k.a] = perMeasureNumDiff.get(k.a,0) + 1;
if (numPrinted <= maxDiffsToPrint):
print "survey "+str(k.b)+", group "+str(k.c)+", measure "+str(k.a)+": "+str(v1)+" -> "+str(v2)
if (numPrinted == maxDiffsToPrint):
print "[won't print any more line-by-line diffs]"
if (numMissing1 > 0) or (numMissing2 > 0):
print str(numMissing1) + " entries missing from first file, " + str(numMissing2) +" from second"
ret = 3
maxDiffSum=0.0
maxAbsDiffSum=0.0
for (k.a,absDiff) in perMeasureDiffAbsSum.iteritems():
if not (absDiff <= 1e-6): # handle NANs
# standard division throws on divide-by-zero, which I don't want
def div(x,y):
try:
return x/y
except ZeroDivisionError:
return 1e400 * 0 # nan
diff=perMeasureDiffSum[k.a]
sum1=perMeasureTotal1[k.a]
sum2=perMeasureTotal2[k.a]
diffSum=div(diff,sum1)
maxDiffSum=max(maxDiffSum,math.fabs(diffSum))
absDiffSum=div(absDiff,sum1)
maxAbsDiffSum=max(maxAbsDiffSum,absDiffSum)
print "for measure "+str(k.a)+":\tsum(1st file):"+str(sum1)+"\tsum(2nd file):"+str(sum2)+"\tdiff/sum: "+str(diffSum)+"\t(abs diff)/sum: "+str(absDiffSum)
if maxDiffSum>0 or maxAbsDiffSum>0:
print "Max diff/sum:",maxDiffSum,"max (abs diff)/sum:",maxAbsDiffSum
if numDiffs == 0:
print "No significant differences (total relative diff: "+str(approxSame.getTotalRelDiff())+"), ok."
return ret,False
else:
print "\033[1;31m"+str(numDiffs)+" significant differences (total relative diff: "+str(approxSame.getTotalRelDiff())+ ")!\033[0;0m"
return 1,False
# Test for options
def evalOptions (args):
parser = OptionParser(usage="Usage: %prog [options] logfile1 logfile2 [max different lines to print]",
# damn reformatting into a single paragraph: this doesn't get printed very nicely when --help is invoked
description="""Compare logfile1 and logfile2 for differences, returning a measure of difference.
See http://code.google.com/p/openmalaria/wiki/UtilsRunScripts#compareOutput.py for details on output.""")
parser.add_option("-R","--rel-precision",
action="store", dest="rel_precision", type="float",
help="Set relative precision (default: 1.0e-6)")
parser.add_option("-A","--abs-precision",
action="store", dest="abs_precision", type="float",
help="Set absolute precision (default: 1.0e-6)")
(options, others) = parser.parse_args(args=args)
return options,others
if __name__ == '__main__':
(options,others) = evalOptions (sys.argv[1:])
if options.rel_precision:
REL_PRECISION=options.rel_precision
if options.abs_precision:
ABS_PRECISION=options.abs_precision
if (len(others) == 3):
ret,ident = main (others[0],others[1],int(others[2]))
elif (len(others) == 2):
ret,ident = main (others[0],others[1])
else:
print "Usage: "+sys.argv[0]+" logfile1 logfile2 [max different lines to print]"
ret=-1
sys.exit(ret)
| gpl-2.0 | -189,319,169,589,652,500 | 38.224044 | 165 | 0.617721 | false |
linyc74/CaMNIST | view.py | 1 | 12377 | import numpy as np
import cv2, time, sys, threading, json, os
from PyQt4 import QtCore, QtGui
from controller import *
class CamnistGUI(QtGui.QMainWindow):
def __init__(self, controller_obj):
super(CamnistGUI, self).__init__()
self.controller = controller_obj
pkg_dir = os.path.dirname(__file__)
path = os.path.join(pkg_dir, 'parameters/gui.json')
gui_parms = json.loads(open(path, 'r').read())
w = gui_parms['monitor_width']
h = gui_parms['monitor_height']
self.setWindowTitle('CaMNIST')
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setGeometry(100, 100, w, h)
self.setFixedSize(w, h)
self.setMouseTracking(True)
self.monitor = QtGui.QLabel(self)
self.monitor.setGeometry(0, 0, w, h)
self.monitor.setAlignment(QtCore.Qt.AlignCenter)
self.toolbar = QtGui.QToolBar('Tool Bar')
self.toolbar.setMovable(True)
self.toolbar.setStyleSheet("QToolBar { background:white; }")
self.toolbar.setIconSize(QtCore.QSize(30, 45))
self.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolbar)
self.info_window = TextWindow()
self.camera_tuner_window = CameraTunerWindow( controller_obj = self.controller )
self.__init__toolbtns()
def __init__toolbtns(self):
# Each action has a unique key and a name
# key = icon filename = method name
# name = text of the action/button
# ( keys , names )
K = [('snapshot' , 'Snapshot' ),
('toggle_recording' , 'Record Video' ),
('open_info' , 'Show Real-time Info' ),
('open_camera_tuner', 'Adjust Camera Parameters' )]
self.actions = {}
self.toolbtns = {}
# Create actions and tool buttons
for key, name in K:
pkg_dir = os.path.dirname(__file__)
path = os.path.join(pkg_dir, 'icons/' + key + '.png')
icon = QtGui.QIcon(path)
self.actions[key] = QtGui.QAction(icon, name, self)
self.toolbtns[key] = self.toolbar.addAction(self.actions[key])
# For actions that needs to be connected to the core object,
K = ['snapshot', 'toggle_recording']
# In this loop I defined a standard way of
# connecting each action to a method in the core object via the controller object.
for key in K:
# Get a argument-less method from the controller object.
# Note that the method_name = key.
method = self.controller.get_method( method_name = key )
# The get_method() returns None
# if a particular method is not found in the core object.
if not method is None:
# Connect the action to the method in the controller object
self.actions[key].triggered.connect(method)
# For actions that needs to be connected to the self gui object,
keys = ['open_info', 'open_camera_tuner']
for key in keys:
try:
method = getattr(self, key)
self.actions[key].triggered.connect(method)
except Exception as exception_inst:
print(exception_inst)
def open_info(self):
if not self.info_window.isVisible():
self.info_window.show()
def open_camera_tuner(self):
self.camera_tuner_window.show()
def wheelEvent(self, event):
if event.delta() > 0:
self.controller.call_method('zoom_in')
else:
self.controller.call_method('zoom_out')
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self,
'CaMNIST',
'Are you sure you want to quit CaMNIST?',
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.controller.call_method('close')
self.info_window.close()
self.camera_tuner_window.close()
event.accept()
else:
event.ignore()
# Methods for incoming signals
def connect_signals(self, thread, signal_name):
'Called by an external object to connect signals.'
# The suffix '(PyQt_PyObject)' means the argument to be transferred
# could be any type of python objects,
# not limited to Qt objects.
signal = signal_name + '(PyQt_PyObject)'
# The method name to be called upon signal arrival = the signal name
try:
method = getattr(self, signal_name)
self.connect(thread, QtCore.SIGNAL(signal), method)
except Exception as exception_inst:
print("Try to connect PyQt signal '{}'".format(signal_name))
print(exception_inst + '\n')
def progress_update(self, text_value):
self.progress_bar.progress_update(text_value)
def display_image(self, image):
# convert from BGR to RGB for latter QImage
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, bytesPerComponent = image.shape
bytesPerLine = bytesPerComponent * width
# convert cv2 image to QImage
Q_img = QtGui.QImage(image,
width, height, bytesPerLine,
QtGui.QImage.Format_RGB888)
# Convert QImage to QPixmap
Q_pixmap = QtGui.QPixmap.fromImage(Q_img)
# Set the QLabel to display the QPixmap
self.monitor.setPixmap(Q_pixmap)
def recording_starts(self):
self.actions['toggle_recording'].setIcon(QtGui.QIcon('icons/stop_recording.png'))
self.actions['toggle_recording'].setText('Stop')
def recording_ends(self):
self.actions['toggle_recording'].setIcon(QtGui.QIcon('icons/toggle_recording.png'))
self.actions['toggle_recording'].setText('Record Video')
def set_info_text(self, text):
self.info_window.setText(text)
def display_topography(self, vertices):
self.gl_window.gl_widget.updateObject(vertices)
class SliderWidget(QtGui.QWidget):
'''
This widget wraps a single parameter in the TunerWindow.
Name, value, min, max, interval are stored in this object.
Three gui elements are included to display the information of the parameter:
1) QLabel showing name
2) QLabel showing value
3) QSlider
'''
def __init__(self, parent, name, min, max, value, interval):
super(SliderWidget, self).__init__(parent)
self.name = name
self.min = min
self.max = max
self.value = value
self.interval = interval
self.hbox = QtGui.QHBoxLayout()
self.QLabel_name = QtGui.QLabel(self)
self.QLabel_value = QtGui.QLabel(self)
self.QSlider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.setLayout(self.hbox)
self.hbox.addWidget(self.QLabel_name)
self.hbox.addWidget(self.QLabel_value)
self.hbox.addWidget(self.QSlider)
self.QLabel_name.setText(name)
self.QLabel_value.setText(str(value))
self.QSlider.setMinimum(min)
self.QSlider.setMaximum(max)
self.QSlider.setValue(value)
self.QSlider.setSingleStep(interval)
self.QSlider.setTickInterval(interval)
self.QSlider.setTickPosition(QtGui.QSlider.TicksBelow)
self.QSlider.valueChanged.connect(self.setValue)
def setValue(self, value):
# Round the value to fit the interval
value = value - self.min
value = round( value / float(self.interval) ) * self.interval
value = int( value + self.min )
self.value = value
self.QSlider.setValue(value)
self.QLabel_value.setText(str(value))
class TextWindow(QtGui.QWidget):
def __init__(self):
super(TextWindow, self).__init__()
self.setWindowTitle('Info')
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setGeometry(150, 150, 512, 256)
self.setFixedSize(512, 256)
self.font = QtGui.QFont()
self.font.setFamily('Segoe UI')
self.font.setBold(False)
self.font.setPixelSize(14)
self.textbox = QtGui.QLabel(self)
self.textbox.setGeometry(0, 0, 512, 256)
self.textbox.setAlignment(QtCore.Qt.AlignLeft)
self.textbox.setFont(self.font)
def setText(self, text):
self.textbox.setText(text)
class TunerWindow(QtGui.QWidget):
'''
A gui template window for tuning parameters.
This class does not contain any business logic.
All it does is to provide an interface to adjust parameters through gui.
Each parameter is wrapped in a 'block' of SliderWidget object.
Properties (name, min, max, value, interval)
of each parameter is stored in the SliderWidget object.
'''
def __init__(self):
super(TunerWindow, self).__init__()
# self.setMinimumWidth(600)
# self.setMaximumWidth(600)
self.main_vbox = QtGui.QVBoxLayout()
self.setLayout(self.main_vbox)
self.btn_hbox = QtGui.QHBoxLayout()
self.main_vbox.addLayout(self.btn_hbox)
K = [('ok' ,'OK' ),
('cancel','Cancel'),
('apply' ,'Apply' )]
self.btn = {}
for key, name in K:
self.btn[key] = QtGui.QPushButton(name, self)
self.btn[key].clicked.connect(getattr(self, key))
self.btn_hbox.addWidget( self.btn[key] )
self.parameters = []
def apply_parameter(self):
'''
Supposed to be overridden.
Defines what to do when ok() or apply() are called.
'''
pass
def ok(self):
self.apply_parameter()
self.hide()
def cancel(self):
self.hide()
def apply(self):
self.apply_parameter()
def add_parameter(self, name, min, max, value, interval):
'''
Add a new SliderWidget object holding all information of the new parameter.
'''
widget = SliderWidget(parent = self,
name = name,
min = min,
max = max,
value = value,
interval = interval)
self.parameters.append(widget)
self.main_vbox.insertWidget(len(self.main_vbox)-1, widget)
class CameraTunerWindow(TunerWindow):
'''
Inherits from the TunerWindow class.
The business logics for the camera imaging parameters
is specified in this class.
This class also manages the transfer of camera parameters
to the core object.
'''
def __init__(self, controller_obj):
super(CameraTunerWindow, self).__init__()
self.controller = controller_obj
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setWindowTitle('Stereo Depth Parameters')
self.setMinimumWidth(600)
self.add_parameter(name='brightness' , min=0 , max=255 , value=150 , interval=5 )
self.add_parameter(name='contrast' , min=0 , max=255 , value=64 , interval=5 )
self.add_parameter(name='saturation' , min=0 , max=255 , value=80 , interval=5 )
self.add_parameter(name='gain' , min=0 , max=255 , value=50 , interval=5 )
self.add_parameter(name='exposure' , min=-7 , max=-1 , value=-4 , interval=1 )
self.add_parameter(name='white_balance' , min=3000, max=6500, value=5000, interval=100)
self.add_parameter(name='focus' , min=0 , max=255 , value=0 , interval=5 )
def apply_parameter(self):
'''
Transfers parameters to the core object via the controller.
'''
parms = {}
for p in self.parameters:
parms[p.name] = p.value
self.controller.call_method( method_name = 'apply_camera_parameters',
arg = parms )
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
gui = CamnistGUI( controller_obj = MockController() )
gui.show()
sys.exit(app.exec_())
| mit | 6,695,720,950,841,457,000 | 32.271505 | 95 | 0.586329 | false |
google-research/google-research | poem/core/keypoint_profiles.py | 1 | 48752 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keypoint profile class and utility functions."""
import abc
import enum
import six
import tensorflow as tf
from poem.core import keypoint_utils
class LeftRightType(enum.Enum):
"""Keypoint/segment left/right type."""
UNKNOWN = 0
CENTRAL = 1
LEFT = 2
RIGHT = 3
def infer_keypoint_left_right_type(left_right_types, indices):
"""Infers keypoint left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
indices: A list of integers for keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
Raises:
ValueError: If any index is out of range.
"""
if not indices:
return LeftRightType.UNKNOWN
def lookup(i):
if i < 0 or i >= len(left_right_types):
raise ValueError('Left/right type index is out of range: %d.' % i)
return left_right_types[i]
if len(indices) == 1:
return lookup(indices[0])
output_type = LeftRightType.CENTRAL
for i in indices:
current_type = lookup(i)
if current_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if output_type == LeftRightType.CENTRAL:
output_type = current_type
elif current_type != LeftRightType.CENTRAL and current_type != output_type:
output_type = LeftRightType.CENTRAL
return output_type
def infer_segment_left_right_type(left_right_types, start_indices, end_indices):
"""Infers segment left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
start_indices: A list of integers for LHS keypoint indices.
end_indices: A list of integers for RHS keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
"""
lhs_type = infer_keypoint_left_right_type(left_right_types, start_indices)
rhs_type = infer_keypoint_left_right_type(left_right_types, end_indices)
if lhs_type == LeftRightType.UNKNOWN or rhs_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if lhs_type == LeftRightType.CENTRAL:
return rhs_type
if rhs_type == LeftRightType.CENTRAL:
return lhs_type
return lhs_type if lhs_type == rhs_type else LeftRightType.CENTRAL
class KeypointProfile(six.with_metaclass(abc.ABCMeta, object)):
"""Keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
scale_distance_reduction_fn,
scale_unit,
segment_name_pairs,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
self._name = name
self._keypoint_names = [name for name, _ in keypoint_names]
self._keypoint_left_right_types = [
left_right_type for _, left_right_type in keypoint_names
]
self._offset_keypoint_index = [
self._keypoint_names.index(keypoint_name)
for keypoint_name in offset_keypoint_names
]
self._scale_keypoint_index_pairs = []
for start_names, end_names in scale_keypoint_name_pairs:
self._scale_keypoint_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._scale_distance_reduction_fn = scale_distance_reduction_fn
self._scale_unit = scale_unit
self._segment_index_pairs = []
for start_names, end_names in segment_name_pairs:
self._segment_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._head_keypoint_name = head_keypoint_name
self._neck_keypoint_name = neck_keypoint_name
self._left_shoulder_keypoint_name = left_shoulder_keypoint_name
self._right_shoulder_keypoint_name = right_shoulder_keypoint_name
self._left_elbow_keypoint_name = left_elbow_keypoint_name
self._right_elbow_keypoint_name = right_elbow_keypoint_name
self._left_wrist_keypoint_name = left_wrist_keypoint_name
self._right_wrist_keypoint_name = right_wrist_keypoint_name
self._spine_keypoint_name = spine_keypoint_name
self._pelvis_keypoint_name = pelvis_keypoint_name
self._left_hip_keypoint_name = left_hip_keypoint_name
self._right_hip_keypoint_name = right_hip_keypoint_name
self._left_knee_keypoint_name = left_knee_keypoint_name
self._right_knee_keypoint_name = right_knee_keypoint_name
self._left_ankle_keypoint_name = left_ankle_keypoint_name
self._right_ankle_keypoint_name = right_ankle_keypoint_name
@property
def name(self):
"""Gets keypoint profile name."""
return self._name
@property
def keypoint_names(self):
"""Gets keypoint names."""
return self._keypoint_names
@property
@abc.abstractmethod
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
raise NotImplementedError
@property
def keypoint_num(self):
"""Gets number of keypoints."""
return len(self._keypoint_names)
def keypoint_left_right_type(self, keypoint_index):
"""Gets keypoint left/right type given index."""
if isinstance(keypoint_index, int):
keypoint_index = [keypoint_index]
return infer_keypoint_left_right_type(self._keypoint_left_right_types,
keypoint_index)
def segment_left_right_type(self, start_index, end_index):
"""Gets segment left/right type given index."""
if isinstance(start_index, int):
start_index = [start_index]
if isinstance(end_index, int):
end_index = [end_index]
return infer_segment_left_right_type(self._keypoint_left_right_types,
start_index, end_index)
@property
def offset_keypoint_index(self):
"""Gets offset keypoint index."""
return self._offset_keypoint_index
@property
def scale_keypoint_index_pairs(self):
"""Gets scale keypoint index pairs."""
return self._scale_keypoint_index_pairs
@property
def scale_unit(self):
"""Gets scale unit."""
return self._scale_unit
@property
def segment_index_pairs(self):
"""Gets segment index pairs."""
return self._segment_index_pairs
@property
def keypoint_affinity_matrix(self):
"""Gets keypoint affinity matrix.
If a segment has multi-point end, all pairs of relevant points are
considered as in affinity.
Returns:
matrix: A double list of floats for the keypoint affinity matrix.
Raises:
ValueError: If affinity matrix has any isolated node.
"""
matrix = [[0.0
for _ in range(self.keypoint_num)]
for _ in range(self.keypoint_num)]
# Self-affinity.
for i in range(self.keypoint_num):
matrix[i][i] = 1.0
for lhs_index, rhs_index in self._segment_index_pairs:
for i in lhs_index:
for j in lhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in rhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in lhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
# Check if the affinity matrix is valid, i.e., each node must have degree
# greater than 1 (no isolated node).
for row in matrix:
if sum(row) <= 1.0:
raise ValueError(
'Affinity matrix has a node with degree less than 2: %s.' %
str(matrix))
return matrix
def keypoint_index(self, keypoint_name, raise_error_if_not_found=False):
"""Gets keypoint index given name.
If `raise_error_if_not_found` is True, raises ValueError if keypoint does
not exist. Otherwise, returns -1 if keypoint does not exist.
Args:
keypoint_name: A string for keypoint name to find index of.
raise_error_if_not_found: A boolean for whether to raise ValueError if
keypoint does not exist.
Returns:
An integer for keypoint index.
Raises:
ValueError: If keypoint does not exist and `raise_error_if_not_found` is
True.
"""
if keypoint_name in self._keypoint_names:
return self._keypoint_names.index(keypoint_name)
if raise_error_if_not_found:
raise ValueError('Failed to find keypoint: `%s`.' % str(keypoint_name))
return -1
@property
def head_keypoint_index(self):
"""Gets head keypoint index."""
if not self._head_keypoint_name:
raise ValueError('Head keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._head_keypoint_name
]
@property
def neck_keypoint_index(self):
"""Gets neck keypoint index."""
if not self._neck_keypoint_name:
raise ValueError('Neck keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._neck_keypoint_name
]
@property
def left_shoulder_keypoint_index(self):
"""Gets left shoulder keypoint index."""
if not self._left_shoulder_keypoint_name:
raise ValueError('Left shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_shoulder_keypoint_name
]
@property
def right_shoulder_keypoint_index(self):
"""Gets right shoulder keypoint index."""
if not self._right_shoulder_keypoint_name:
raise ValueError('Right shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_shoulder_keypoint_name
]
@property
def left_elbow_keypoint_index(self):
"""Gets left elbow keypoint index."""
if not self._left_elbow_keypoint_name:
raise ValueError('Left elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_elbow_keypoint_name
]
@property
def right_elbow_keypoint_index(self):
"""Gets right elbow keypoint index."""
if not self._right_elbow_keypoint_name:
raise ValueError('Right elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_elbow_keypoint_name
]
@property
def left_wrist_keypoint_index(self):
"""Gets left wrist keypoint index."""
if not self._left_wrist_keypoint_name:
raise ValueError('Left wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_wrist_keypoint_name
]
@property
def right_wrist_keypoint_index(self):
"""Gets right wrist keypoint index."""
if not self._right_wrist_keypoint_name:
raise ValueError('Right wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_wrist_keypoint_name
]
@property
def spine_keypoint_index(self):
"""Gets spine keypoint index."""
if not self._spine_keypoint_name:
raise ValueError('Spine keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._spine_keypoint_name
]
@property
def pelvis_keypoint_index(self):
"""Gets pelvis keypoint index."""
if not self._pelvis_keypoint_name:
raise ValueError('Pelvis keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._pelvis_keypoint_name
]
@property
def left_hip_keypoint_index(self):
"""Gets left hip keypoint index."""
if not self._left_hip_keypoint_name:
raise ValueError('Left hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_hip_keypoint_name
]
@property
def right_hip_keypoint_index(self):
"""Gets right hip keypoint index."""
if not self._right_hip_keypoint_name:
raise ValueError('Right hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_hip_keypoint_name
]
@property
def left_knee_keypoint_index(self):
"""Gets left knee keypoint index."""
if not self._left_knee_keypoint_name:
raise ValueError('Left knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_knee_keypoint_name
]
@property
def right_knee_keypoint_index(self):
"""Gets right knee keypoint index."""
if not self._right_knee_keypoint_name:
raise ValueError('Right knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_knee_keypoint_name
]
@property
def left_ankle_keypoint_index(self):
"""Gets left ankle keypoint index."""
if not self._left_ankle_keypoint_name:
raise ValueError('Left ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_ankle_keypoint_name
]
@property
def right_ankle_keypoint_index(self):
"""Gets right ankle keypoint index."""
if not self._right_ankle_keypoint_name:
raise ValueError('Right ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_ankle_keypoint_name
]
@property
def standard_part_names(self):
"""Gets all standard part names."""
return [
'HEAD', 'NECK', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'SPINE', 'PELVIS',
'LEFT_HIP', 'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
]
def get_standard_part_index(self, part_name):
"""Gets part index by standardized name."""
if part_name.upper() == 'HEAD':
return self.head_keypoint_index
if part_name.upper() == 'NECK':
return self.neck_keypoint_index
if part_name.upper() == 'LEFT_SHOULDER':
return self.left_shoulder_keypoint_index
if part_name.upper() == 'RIGHT_SHOULDER':
return self.right_shoulder_keypoint_index
if part_name.upper() == 'LEFT_ELBOW':
return self.left_elbow_keypoint_index
if part_name.upper() == 'RIGHT_ELBOW':
return self.right_elbow_keypoint_index
if part_name.upper() == 'LEFT_WRIST':
return self.left_wrist_keypoint_index
if part_name.upper() == 'RIGHT_WRIST':
return self.right_wrist_keypoint_index
if part_name.upper() == 'SPINE':
return self.spine_keypoint_index
if part_name.upper() == 'PELVIS':
return self.pelvis_keypoint_index
if part_name.upper() == 'LEFT_HIP':
return self.left_hip_keypoint_index
if part_name.upper() == 'RIGHT_HIP':
return self.right_hip_keypoint_index
if part_name.upper() == 'LEFT_KNEE':
return self.left_knee_keypoint_index
if part_name.upper() == 'RIGHT_KNEE':
return self.right_knee_keypoint_index
if part_name.upper() == 'LEFT_ANKLE':
return self.left_ankle_keypoint_index
if part_name.upper() == 'RIGHT_ANKLE':
return self.right_ankle_keypoint_index
raise ValueError('Unsupported part name: `%s`.' % part_name)
def normalize(self, keypoints, keypoint_masks=None):
"""Normalizes keypoints."""
del keypoint_masks
return keypoint_utils.normalize_points(
keypoints,
offset_point_indices=self._offset_keypoint_index,
scale_distance_point_index_pairs=self._scale_keypoint_index_pairs,
scale_distance_reduction_fn=self._scale_distance_reduction_fn,
scale_unit=self._scale_unit)
def denormalize(self,
normalized_keypoints,
offset_points,
scale_distances,
keypoint_masks=None):
"""Denormalizes keypoints."""
del keypoint_masks
return (normalized_keypoints / self._scale_unit * scale_distances +
offset_points)
class KeypointProfile3D(KeypointProfile):
"""3D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
scale_distance_reduction_fn=tf.math.reduce_sum,
scale_unit=1.0,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile3D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 3
class KeypointProfile2D(KeypointProfile):
"""2D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
compatible_keypoint_name_dict=None,
scale_distance_reduction_fn=tf.math.reduce_max,
scale_unit=0.5,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile2D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
self._compatible_keypoint_name_dict = {}
if compatible_keypoint_name_dict is not None:
for _, compatible_keypoint_names in compatible_keypoint_name_dict.items():
if len(compatible_keypoint_names) != len(self._keypoint_names):
raise ValueError('Compatible keypoint names must be of the same size '
'as keypoint names.')
self._compatible_keypoint_name_dict = compatible_keypoint_name_dict
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 2
@property
def compatible_keypoint_name_dict(self):
"""Gets compatible keypoint name dictionary."""
return self._compatible_keypoint_name_dict
class Std16KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 16-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std16KeypointProfile3D,
self).__init__(
name='3DSTD16',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('NECK', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('SPINE', LeftRightType.CENTRAL),
('PELVIS', LeftRightType.CENTRAL),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['PELVIS'],
scale_keypoint_name_pairs=[(['NECK'], ['SPINE']),
(['SPINE'], ['PELVIS'])],
segment_name_pairs=[(['HEAD'], ['NECK']),
(['NECK'], ['LEFT_SHOULDER']),
(['NECK'], ['RIGHT_SHOULDER']),
(['NECK'], ['SPINE']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['SPINE'], ['PELVIS']),
(['PELVIS'], ['LEFT_HIP']),
(['PELVIS'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['NECK'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=['SPINE'],
pelvis_keypoint_name=['PELVIS'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class Std13KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile3D, self).__init__(
name='3DSTD13',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_HIP', 'RIGHT_HIP'])],
segment_name_pairs=[
(['HEAD'], ['LEFT_SHOULDER', 'RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['LEFT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP',
'RIGHT_HIP'], ['LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['LEFT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']), (['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']), (['RIGHT_KNEE'], ['RIGHT_ANKLE'])
],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyH36m17KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m17KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M17',
keypoint_names=[('Hip', LeftRightType.CENTRAL),
('Head', LeftRightType.CENTRAL),
('Neck/Nose', LeftRightType.CENTRAL),
('Thorax', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('Spine', LeftRightType.CENTRAL),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['Hip'],
scale_keypoint_name_pairs=[(['Hip'], ['Spine']),
(['Spine'], ['Thorax'])],
segment_name_pairs=[(['Hip'], ['Spine']), (['Hip'], ['LHip']),
(['Hip'], ['RHip']), (['Spine'], ['Thorax']),
(['LHip'], ['LKnee']), (['RHip'], ['RKnee']),
(['LKnee'], ['LFoot']), (['RKnee'], ['RFoot']),
(['Thorax'], ['Neck/Nose']),
(['Thorax'], ['LShoulder']),
(['Thorax'], ['RShoulder']),
(['Neck/Nose'], ['Head']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['Thorax'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['Spine'],
pelvis_keypoint_name=['Hip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyH36m13KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip'],
scale_keypoint_name_pairs=[
(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
],
segment_name_pairs=[(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
(['LHip', 'RHip'], ['LHip']),
(['LHip', 'RHip'], ['RHip']), (['LHip'], ['LKnee']),
(['RHip'], ['RKnee']), (['LKnee'], ['LFoot']),
(['RKnee'], ['RFoot']),
(['LShoulder', 'RShoulder'], ['Head']),
(['LShoulder', 'RShoulder'], ['LShoulder']),
(['LShoulder', 'RShoulder'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyMpii3dhp17KeypointProfile3D(KeypointProfile3D):
"""Legacy MPII-3DHP 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyMpii3dhp17KeypointProfile3D, self).__init__(
name='LEGACY_3DMPII3DHP17',
keypoint_names=[('pelvis', LeftRightType.CENTRAL),
('head', LeftRightType.CENTRAL),
('neck', LeftRightType.CENTRAL),
('head_top', LeftRightType.CENTRAL),
('left_shoulder', LeftRightType.LEFT),
('right_shoulder', LeftRightType.RIGHT),
('left_elbow', LeftRightType.LEFT),
('right_elbow', LeftRightType.RIGHT),
('left_wrist', LeftRightType.LEFT),
('right_wrist', LeftRightType.RIGHT),
('spine', LeftRightType.CENTRAL),
('left_hip', LeftRightType.LEFT),
('right_hip', LeftRightType.RIGHT),
('left_knee', LeftRightType.LEFT),
('right_knee', LeftRightType.RIGHT),
('left_ankle', LeftRightType.LEFT),
('right_ankle', LeftRightType.RIGHT)],
offset_keypoint_names=['pelvis'],
scale_keypoint_name_pairs=[(['pelvis'], ['spine']),
(['spine'], ['neck'])],
segment_name_pairs=[(['pelvis'], ['spine']), (['pelvis'], ['left_hip']),
(['pelvis'], ['right_hip']), (['spine'], ['neck']),
(['left_hip'], ['left_knee']),
(['right_hip'], ['right_knee']),
(['left_knee'], ['left_ankle']),
(['right_knee'], ['right_ankle']),
(['neck'], ['head']), (['neck'], ['left_shoulder']),
(['neck'], ['right_shoulder']),
(['head'], ['head_top']),
(['left_shoulder'], ['left_elbow']),
(['right_shoulder'], ['right_elbow']),
(['left_elbow'], ['left_wrist']),
(['right_elbow'], ['right_wrist'])],
head_keypoint_name=['head'],
neck_keypoint_name=['neck'],
left_shoulder_keypoint_name=['left_shoulder'],
right_shoulder_keypoint_name=['right_shoulder'],
left_elbow_keypoint_name=['left_elbow'],
right_elbow_keypoint_name=['right_elbow'],
left_wrist_keypoint_name=['left_wrist'],
right_wrist_keypoint_name=['right_wrist'],
spine_keypoint_name=['spine'],
pelvis_keypoint_name=['pelvis'],
left_hip_keypoint_name=['left_hip'],
right_hip_keypoint_name=['right_hip'],
left_knee_keypoint_name=['left_knee'],
right_knee_keypoint_name=['right_knee'],
left_ankle_keypoint_name=['left_ankle'],
right_ankle_keypoint_name=['right_ankle'])
class Std13KeypointProfile2D(KeypointProfile2D):
"""Standard 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile2D, self).__init__(
name='2DSTD13',
keypoint_names=[('NOSE_TIP', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['LEFT_SHOULDER'], ['RIGHT_HIP']),
(['RIGHT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP'])],
segment_name_pairs=[(['NOSE_TIP'], ['LEFT_SHOULDER']),
(['NOSE_TIP'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow', 'LWrist',
'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee', 'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['NOSE_TIP'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyCoco13KeypointProfile2D(Std13KeypointProfile2D):
"""Legacy COCO 2D 13-keypoint profile.
This profile is the same as the `2DSTD13` profil, except the name.
"""
def __init__(self):
"""Initializer."""
super(LegacyCoco13KeypointProfile2D, self).__init__()
self._name = 'LEGACY_2DCOCO13'
class LegacyH36m13KeypointProfile2D(KeypointProfile2D):
"""Legacy Human3.6M 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile2D,
self).__init__(
name='LEGACY_2DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip', 'RHip'],
scale_keypoint_name_pairs=[(['LShoulder'], ['RShoulder']),
(['LShoulder'], ['LHip']),
(['LShoulder'], ['RHip']),
(['RShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['RHip'])],
segment_name_pairs=[(['Head'], ['LShoulder']),
(['Head'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['LElbow'], ['LWrist']),
(['RShoulder'], ['RElbow']),
(['RElbow'], ['RWrist']),
(['LShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['LKnee']), (['LKnee'], ['LFoot']),
(['RHip'], ['RKnee']), (['RKnee'], ['RFoot']),
(['LShoulder'], ['RShoulder']),
(['LHip'], ['RHip'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow',
'LWrist', 'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee',
'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
def create_keypoint_profile_or_die(keypoint_profile_name):
"""Creates keypoint profile based on name.
Args:
keypoint_profile_name: A string for keypoint profile name.
Returns:
A keypint profile class object.
Raises:
ValueError: If keypoint profile name is unsupported.
"""
if keypoint_profile_name == '3DSTD16':
return Std16KeypointProfile3D()
if keypoint_profile_name == '3DSTD13':
return Std13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M17':
return LegacyH36m17KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M13':
return LegacyH36m13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DMPII3DHP17':
return LegacyMpii3dhp17KeypointProfile3D()
if keypoint_profile_name == '2DSTD13':
return Std13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DCOCO13':
return LegacyCoco13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DH36M13':
return LegacyH36m13KeypointProfile2D()
raise ValueError('Unsupported keypoint profile name: `%s`.' %
str(keypoint_profile_name))
| apache-2.0 | 7,232,078,692,394,039,000 | 41.027586 | 80 | 0.556059 | false |
stilobique/UE4-Tools | controllers/data_buffer.py | 1 | 3448 | import bpy
import pyperclip
from math import degrees
class DataBuffer(bpy.types.Operator):
"""Export data Position, Rotation and Scale of all selected element"""
bl_idname = "object.data_buffer"
bl_label = "Paste information buffer"
def execute(self, context):
objs = context.selected_objects
string_data_prefixe = 'Begin Map \n'' Begin Level \n'
string_data_suffixe = ' End Level\n''Begin Surface\n''End ' \
'Surface\n''End Map'
string_data = ""
for element in objs:
if element is not None:
position_x = str(round(element.location.x * 100, 2))
position_y = str(round(element.location.y * -100, 2))
position_z = str(round(element.location.z * 100, 2))
rotation_pitch = str(round(degrees(element.rotation_euler.y), 2))
rotation_yaw = str(round(degrees(element.rotation_euler.z),
2)* -1)
rotation_roll = str(round(degrees(element.rotation_euler.x), 2))
string_data = string_data + \
' Begin Actor '\
'Class=StaticMeshActor '\
'Name=' + element.name + ' ' \
'Archetype=StaticMeshActor'\
'\'/Script/Engine.Default__StaticMeshActor\'\n'\
' Begin Object Class=StaticMeshComponent '\
'Name=StaticMeshComponent0 '\
'ObjName=StaticMeshComponent0 ' \
'Archetype=StaticMeshComponent'\
'\'/Script/Engine.Default__StaticMeshActor:StaticMeshComponent0' \
'\'\n'\
' End Object\n'\
' Begin Object '\
'Name=StaticMeshComponent0\n'\
' StaticMesh=StaticMesh\'/Engine/EditorMeshes/EditorCube' \
'.EditorCube\' \n'\
' RelativeLocation=(X=' + position_x + ',Y=' + \
position_y + ',Z=' + position_z + ')\n'\
' RelativeScale3D=(X=' + str(round(element.scale.x, 2)) + ',' \
'Y=' + str(round(element.scale.y, 2)) + ',' \
'Z=' + \
str(round(element.scale.z, 2)) + ')\n'\
' RelativeRotation=(Pitch=' + rotation_pitch + ',Yaw=' + \
rotation_yaw + ',' \
'Roll=' + \
rotation_roll + ')\n'\
' CustomProperties\n' \
' End Object\n' \
' StaticMeshComponent=StaticMeshComponent0\n' \
' Components(0)=StaticMeshComponent0\n' \
' RootComponent=StaticMeshComponent0\n' \
' ActorLabel="' + element.name + '"\n' \
' End Actor\n' \
else:
self.report({'WARNING'}, "Select an object(s).")
return {'CANCELLED'}
string_complete = string_data_prefixe + string_data \
+ string_data_suffixe
# copyBuffer(objs[0].name)
pyperclip.copy(string_complete)
print(string_complete)
self.report({'INFO'}, "Data copied on your Buffer.")
return {'FINISHED'}
def register():
bpy.utils.register_class(DataBuffer)
def unregister():
bpy.utils.unregister_class(DataBuffer)
if __name__ == "__main__":
register() | gpl-3.0 | -6,769,097,919,469,354,000 | 37.322222 | 114 | 0.49739 | false |
posquit0/dotfiles | vim/.vim/ycm_extra_conf.py | 1 | 6657 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit | -7,185,472,345,298,154,000 | 32.964286 | 107 | 0.705122 | false |
prefetchnta/questlab | bin/x64bin/python/37/Lib/ctypes/util.py | 1 | 13437 | import os
import shutil
import subprocess
import sys
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def _get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
# This function was copied from Lib/distutils/msvccompiler.py
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
if majorVersion >= 13:
majorVersion += 1
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def find_msvcrt():
"""Return the name of the VC runtime dll"""
version = _get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
elif version <= 13:
clibname = 'msvcr%d' % (version * 10)
else:
# CRT is no longer directly loadable. See issue23606 for the
# discussion about alternative approaches.
return None
# If python was built with in debug mode
import importlib.machinery
if '_d.pyd' in importlib.machinery.EXTENSION_SUFFIXES:
clibname += 'd'
return clibname+'.dll'
def find_library(name):
if name in ('c', 'm'):
return find_msvcrt()
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
return fname
return None
elif os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif sys.platform.startswith("aix"):
# AIX has two styles of storing shared libraries
# GNU auto_tools refer to these as svr4 and aix
# svr4 (System V Release 4) is a regular file, often with .so as suffix
# AIX style uses an archive (suffix .a) with members (e.g., shr.o, libssl.so)
# see issue#26439 and _aix.py for more details
from ctypes._aix import find_library
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile
def _findLib_gcc(name):
# Run GCC's linker with the -t (aka --trace) option and examine the
# library name it prints out. The GCC command will fail because we
# haven't supplied a proper program with main(), but that does not
# matter.
expr = os.fsencode(r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name))
c_compiler = shutil.which('gcc')
if not c_compiler:
c_compiler = shutil.which('cc')
if not c_compiler:
# No C compiler available, give up
return None
temp = tempfile.NamedTemporaryFile()
try:
args = [c_compiler, '-Wl,-t', '-o', temp.name, '-l' + name]
env = dict(os.environ)
env['LC_ALL'] = 'C'
env['LANG'] = 'C'
try:
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
except OSError: # E.g. bad executable
return None
with proc:
trace = proc.stdout.read()
finally:
try:
temp.close()
except FileNotFoundError:
# Raised if the file was already removed, which is the normal
# behaviour of GCC if linking fails
pass
res = re.search(expr, trace)
if not res:
return None
return os.fsdecode(res.group(0))
if sys.platform == "sunos5":
# use /usr/ccs/bin/dump on solaris
def _get_soname(f):
if not f:
return None
try:
proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. command not found
return None
with proc:
data = proc.stdout.read()
res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data)
if not res:
return None
return os.fsdecode(res.group(1))
else:
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
objdump = shutil.which('objdump')
if not objdump:
# objdump is not available, give up
return None
try:
proc = subprocess.Popen((objdump, '-p', '-j', '.dynamic', f),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. bad executable
return None
with proc:
dump = proc.stdout.read()
res = re.search(br'\sSONAME\s+([^\s]+)', dump)
if not res:
return None
return os.fsdecode(res.group(1))
if sys.platform.startswith(("freebsd", "openbsd", "dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(b".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [sys.maxsize]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
expr = os.fsencode(expr)
try:
proc = subprocess.Popen(('/sbin/ldconfig', '-r'),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. command not found
data = b''
else:
with proc:
data = proc.stdout.read()
res = re.findall(expr, data)
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(key=_num_version)
return os.fsdecode(res[-1])
elif sys.platform == "sunos5":
def _findLib_crle(name, is64):
if not os.path.exists('/usr/bin/crle'):
return None
env = dict(os.environ)
env['LC_ALL'] = 'C'
if is64:
args = ('/usr/bin/crle', '-64')
else:
args = ('/usr/bin/crle',)
paths = None
try:
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
except OSError: # E.g. bad executable
return None
with proc:
for line in proc.stdout:
line = line.strip()
if line.startswith(b'Default Library Path (ELF):'):
paths = os.fsdecode(line).split()[4]
if not paths:
return None
for dir in paths.split(":"):
libfile = os.path.join(dir, "lib%s.so" % name)
if os.path.exists(libfile):
return libfile
return None
def find_library(name, is64 = False):
return _get_soname(_findLib_crle(name, is64) or _findLib_gcc(name))
else:
def _findSoname_ldconfig(name):
import struct
if struct.calcsize('l') == 4:
machine = os.uname().machine + '-32'
else:
machine = os.uname().machine + '-64'
mach_map = {
'x86_64-64': 'libc6,x86-64',
'ppc64-64': 'libc6,64bit',
'sparc64-64': 'libc6,64bit',
's390x-64': 'libc6,64bit',
'ia64-64': 'libc6,IA-64',
}
abi_type = mach_map.get(machine, 'libc6')
# XXX assuming GLIBC's ldconfig (with option -p)
regex = r'\s+(lib%s\.[^\s]+)\s+\(%s'
regex = os.fsencode(regex % (re.escape(name), abi_type))
try:
with subprocess.Popen(['/sbin/ldconfig', '-p'],
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
env={'LC_ALL': 'C', 'LANG': 'C'}) as p:
res = re.search(regex, p.stdout.read())
if res:
return os.fsdecode(res.group(1))
except OSError:
pass
def _findLib_ld(name):
# See issue #9998 for why this is needed
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
cmd = ['ld', '-t']
libpath = os.environ.get('LD_LIBRARY_PATH')
if libpath:
for d in libpath.split(':'):
cmd.extend(['-L', d])
cmd.extend(['-o', os.devnull, '-l%s' % name])
result = None
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
res = re.search(expr, os.fsdecode(out))
if res:
result = res.group(0)
except Exception as e:
pass # result will be None
return result
def find_library(name):
# See issue #9998
return _findSoname_ldconfig(name) or \
_get_soname(_findLib_gcc(name) or _findLib_ld(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print(cdll.msvcrt)
print(cdll.load("msvcrt"))
print(find_library("msvcrt"))
if os.name == "posix":
# find and load_version
print(find_library("m"))
print(find_library("c"))
print(find_library("bz2"))
# load
if sys.platform == "darwin":
print(cdll.LoadLibrary("libm.dylib"))
print(cdll.LoadLibrary("libcrypto.dylib"))
print(cdll.LoadLibrary("libSystem.dylib"))
print(cdll.LoadLibrary("System.framework/System"))
# issue-26439 - fix broken test call for AIX
elif sys.platform.startswith("aix"):
from ctypes import CDLL
if sys.maxsize < 2**32:
print(f"Using CDLL(name, os.RTLD_MEMBER): {CDLL('libc.a(shr.o)', os.RTLD_MEMBER)}")
print(f"Using cdll.LoadLibrary(): {cdll.LoadLibrary('libc.a(shr.o)')}")
# librpm.so is only available as 32-bit shared library
print(find_library("rpm"))
print(cdll.LoadLibrary("librpm.so"))
else:
print(f"Using CDLL(name, os.RTLD_MEMBER): {CDLL('libc.a(shr_64.o)', os.RTLD_MEMBER)}")
print(f"Using cdll.LoadLibrary(): {cdll.LoadLibrary('libc.a(shr_64.o)')}")
print(f"crypt\t:: {find_library('crypt')}")
print(f"crypt\t:: {cdll.LoadLibrary(find_library('crypt'))}")
print(f"crypto\t:: {find_library('crypto')}")
print(f"crypto\t:: {cdll.LoadLibrary(find_library('crypto'))}")
else:
print(cdll.LoadLibrary("libm.so"))
print(cdll.LoadLibrary("libcrypt.so"))
print(find_library("crypt"))
if __name__ == "__main__":
test()
| lgpl-2.1 | 6,956,310,689,996,022,000 | 35.53352 | 102 | 0.467143 | false |
AceSrc/datagon | datagon/generator/translator.py | 1 | 3802 | import parser
import random
result = ''
symbol = {}
cnt = 0
def Translator(ast):
def PrintError(x):
print(x)
exit(1)
def PrintMsg(x):
print(x)
def Output(x):
global result
result += str(x) + ' '
def GetRandomInt(interval):
if isinstance(interval, str):
PrintError('Error: ' + interval)
if isinstance(interval, int):
return interval
if interval[0] > interval[1]:
print('!!! Invaild Interval ')
exit(1)
rt = random.randint(interval[0], interval[1])
return rt
def AddPermutation(interval):
n = GetRandomInt(interval)
p = [i for i in range(1, n + 1)]
random.shuffle(p)
global result
for i in p:
result += str(i) + ' '
return None
def Add(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def Mul(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def Sub(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def AddWeight(n, interval):
n = GetRandomInt(n)
for i in range(0, n):
Output(GetRandomInt(interval))
def RepeatOutput(node):
times = TranslateNode(node.params[0], node)
for i in range(0, times):
TranslateArray(node)
AddNewLine()
def HandleFunction(node):
print('handling function: ' + node.type)
print(node.params)
cases = {
'print': lambda x: Output(GetRandomInt(TranslateNode(node.params[0], x))),
'add': lambda x: Add(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'sub': lambda x: Sub(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'mul': lambda x: Mul(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'permutation': lambda x: AddPermutation(TranslateNode(x.params[0], x)),
'weight': lambda x: AddWeight(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'repeat': lambda x: RepeatOutput(x),
'set': lambda x: SetVariableValue(x.params[0].name, TranslateNode(x.params[1], x))
}
return cases.get(node.type, lambda x: None)(node)
def AddNewLine():
global cnt
cnt += 1
if cnt <= 0:
return
cnt -= 1
global result
result += '\n'
def CleanLine():
print("Clean")
global cnt
cnt -= 1
def HandleFormat(node):
print("Handling Format: " + node.value)
cases = {
'newline': lambda x: AddNewLine(),
'clearline': lambda x: CleanLine(),
}
return cases.get(node.value, lambda x: None)(node)
def GetVariableValue(name):
return symbol.get(name, name)
def SetVariableValue(name, value):
value = GetRandomInt(value)
symbol[name] = value
print('Set variable: ' + str(name) + ' = ' + str(symbol[name]))
return symbol[name]
def TranslateArray(node):
for x in node.params:
TranslateNode(x, node)
def TranslateNode(node, parent):
cases = {
parser.Function: lambda x: HandleFunction(x),
parser.Number: lambda x: x.value,
parser.Interval:
lambda x: [TranslateNode(x.left, x) + x.leftoffset, TranslateNode(x.right, x) + x.rightoffset],
parser.String: lambda x: GetVariableValue(x.name),
parser.Setvar: lambda x: SetVariableValue(x),
parser.Program: lambda x: TranslateArray(x),
parser.Format: lambda x: HandleFormat(x),
}
return cases.get(node.__class__, lambda x: None)(node)
TranslateArray(ast)
return result
| mit | 9,119,672,105,469,806,000 | 29.66129 | 111 | 0.553919 | false |
sesh/djver | djver/djver.py | 1 | 6137 | #!/usr/bin/env python
"""
djver.
Usage:
djver.py [<url>] [--static-path=<static-path>] [--find-diffs] [--verbose]
Options:
--static-path=<static-path> URL path to the site's static files [default: /static/].
--find-diffs Attempt to find differences between the known versions of Django
--verbose Turn on verbose logging
"""
import os
import sys
import subprocess
import shutil
import difflib
import requests
from docopt import docopt
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
try:
from packaging.version import parse
except:
def parse(version):
return None
RESPONSE_CACHE = {}
THIRD_PARTY_CSS = [
# Third party apps, might disguise version numbers
('django-flat-theme, or Django 1.9', 'fonts.css', 'Roboto'),
('django-suit', 'forms.css', 'Django Suit'),
]
ADMIN_CHANGES = [
('2.1.2-2.1', 'css/base.css', 'background: url(../img/icon-viewlink.svg) 0 1px no-repeat;'),
('2.0.9-2.0', 'css/base.css', 'textarea:focus, select:focus, .vTextField:focus {'),
('1.11.16-1.11', 'css/base.css', 'background-position: right 7px center;'),
('1.10.8-1.10', 'css/base.css', 'color: #000;'),
('1.9.13-1.9', 'css/widgets.css', 'margin-left: 7px;'),
('1.8.19-1.8.2', 'css/forms.css', 'clear: left;'),
('1.8.1', 'css/widgets.css', '.related-widget-wrapper {'),
('1.8', 'css/widgets.css', 'opacity: 1;'),
('1.7.11-1.7', 'css/base.css', '#branding a:hover {'),
('1.6.11-1.6', 'css/widgets.css', 'width: 360px;'),
('1.5.12-1.5', 'css/widgets.css', '.url a {'),
('1.4.22-1.4.1', 'css/widgets.css', '.inline-group .aligned .selector .selector-filter label {'),
]
def check_str(url, search_str, verbose=False):
if url in RESPONSE_CACHE.keys():
content = RESPONSE_CACHE[url]
status_code = 200
else:
response = requests.get(url)
content = response.content.decode().replace(' ', '')
status_code = response.status_code
if verbose:
print('[{}] {}'.format(status_code, url))
if status_code == 200:
RESPONSE_CACHE[url] = content
return search_str.replace(' ', '') in content
def check_version(base_url, static_path, verbose=False):
if not base_url.startswith('http'):
base_url = 'http://{}'.format(base_url)
for version, path, string in ADMIN_CHANGES:
url = '{}{}admin/{}'.format(base_url, static_path, path)
if check_str(url, string, verbose):
return version
def find_diffs():
response = requests.get('https://pypi.org/pypi/Django/json')
versions = [parse(v) for v in response.json()['releases'].keys()]
versions = sorted(versions, reverse=True)
print(versions)
versions = [str(v) for v in versions if v.is_prerelease == False and v > parse("1.3.99")]
# we only care about 1.4 and above
# favour files _not_ found in django-flat-theme
files = [
# admin js
# "js/SelectBox.js",
# "js/actions.js",
# "js/actions.min.js",
# "js/calendar.js",
# "js/collapse.js",
# "js/collapse.min.js",
# "js/core.js",
# "js/inlines.js",
# "js/inlines.min.js",
# "js/jquery.init.js",
# "js/prepopulate.js",
# "js/prepopulate.min.js",
# "js/timeparse.js",
# "js/urlify.js",
# admin css
'css/widgets.css', 'css/base.css', 'css/forms.css', 'css/login.css', 'css/dashboard.css',
# 'css/ie.css', # removed in 1.9.x
]
for v in versions:
os.makedirs('files/{}/css/'.format(v), exist_ok=True)
os.makedirs('files/{}/js/'.format(v), exist_ok=True)
for fn in files:
full_path = 'files/{}/{}'.format(v, fn)
if not os.path.exists(full_path):
repo = 'https://raw.githubusercontent.com/django/django/'
url = '{}{}/django/contrib/admin/static/admin/{}'.format(repo, v, fn)
if v.startswith('1.3'):
url = '{}{}/django/contrib/admin/media/{}'.format(repo, v, fn)
response = requests.get(url)
print('[{}] {}'.format(response.status_code, url))
with open(full_path, 'wb') as f:
f.write(response.content)
matched_versions = []
for i, v1 in enumerate(versions[:-1]):
matched_versions.append(v1)
v2 = versions[i + 1]
new_line = None
for f in files:
f1 = open('files/{}/{}'.format(v1, f)).read()
f2 = open('files/{}/{}'.format(v2, f)).read()
# compare f2 to f1 so that we see _added_ lines
diff = difflib.ndiff(f2.splitlines(), f1.splitlines())
for line in diff:
if line.startswith('+ ') and '/*' not in line:
line = line[2:]
# ensure this line is unique within the file
if f1.count(line) == 1:
# we also want to make sure that it doesn't appear in any _older_ versions
for v in versions[i + 1:]:
f3 = open('files/{}/{}'.format(v, f)).read()
if line in f3:
break
new_line = line
if new_line:
if len(matched_versions) > 1:
print("('{}', '{}', '{}'),".format('-'.join([matched_versions[0], matched_versions[-1]]), f, new_line.strip()))
else:
print("('{}', '{}', '{}'),".format(matched_versions[0], f, new_line.strip()))
matched_versions = []
break
def djver():
arguments = docopt(__doc__, version='djver 2.0.0')
if arguments['--find-diffs']:
find_diffs()
elif arguments['<url>']:
version = check_version(arguments['<url>'], arguments['--static-path'], arguments['--verbose'])
if version:
print(version)
else:
print('Unable to detect version.')
if __name__ == '__main__':
djver()
| mit | 6,160,630,217,541,258,000 | 31.994624 | 131 | 0.536093 | false |
marrow/mongo | test/trait/test_collection.py | 1 | 3008 | # encoding: utf-8
from __future__ import unicode_literals
import pytest
from pymongo.errors import WriteError
from marrow.mongo import Field, Index
from marrow.mongo.trait import Collection
@pytest.fixture
def db(request, connection):
return connection.test
@pytest.fixture
def coll(request, db):
return db.collection
@pytest.fixture
def Sample(request):
class Sample(Collection):
__collection__ = 'collection'
__engine__ = {'mmapv1': {}}
id = None # Remove the default identifier.
field = Field()
other = Field()
_field = Index('field', background=False)
return Sample
class TestDocumentBinding(object):
def test_bind_fail(self, Sample):
with pytest.raises(TypeError):
Sample.bind()
def test_bind_specific_collection(self, coll, Sample):
assert not Sample.__bound__
Sample.bind(coll)
assert Sample.__bound__
def test_bind_specific_collection_twice(self, coll, Sample):
assert not Sample.__bound__
Sample.bind(coll)
assert Sample.__bound__
first = Sample.__bound__
Sample.bind(coll)
assert Sample.__bound__ is first
assert Sample.get_collection() is first
def test_bind_database(self, db, Sample):
assert not Sample.__bound__
Sample.bind(db)
assert Sample.__bound__
def test_create_collection(self, db, Sample):
assert Sample.create_collection(db, drop=True, indexes=False).name == 'collection'
def test_create_bound_collection(self, db, Sample):
assert Sample.bind(db).create_collection(drop=True, indexes=False).name == 'collection'
def test_create_collection_failure(self, Sample):
with pytest.raises(TypeError):
Sample.create_collection()
with pytest.raises(TypeError):
Sample.create_collection("Hoi.")
def test_create_collection_collection(self, db, Sample):
assert Sample.create_collection(db.foo, True).name == 'foo'
def test_get_collection_failure(self, Sample):
with pytest.raises(TypeError):
Sample.get_collection(None)
with pytest.raises(TypeError):
Sample.get_collection("Hoi.")
def test_validation(self, db, Sample):
if tuple((int(i) for i in db.client.server_info()['version'].split('.')[:3])) < (3, 2):
pytest.xfail("Test expected to fail on MongoDB versions prior to 3.2.")
Sample.__validate__ = 'strict'
Sample.__validator__ = {'field': {'$gt': 27}}
c = Sample.create_collection(db, True, indexes=False)
c.insert_one(Sample(42))
with pytest.raises(WriteError):
c.insert_one(Sample(12))
def test_index_construction(self, db, Sample):
c = Sample.create_collection(db, True, indexes=False)
Sample.create_indexes(c, True)
indexes = c.index_information()
assert '_field' in indexes
del indexes['_field']['v']
assert indexes['_field'] == {
'background': False,
'key': [('field', 1)],
'ns': 'test.collection',
'sparse': False
}
def test_update_empty(self, db, Sample):
Sample.bind(db).create_collection(drop=True)
with pytest.raises(TypeError):
Sample().update_one()
| mit | 8,838,251,156,425,727,000 | 23.258065 | 89 | 0.68617 | false |
ICTU/quality-time | components/server/src/initialization/report.py | 1 | 2301 | """Report loaders."""
import json
import logging
import pathlib
from pymongo.database import Database
from database.reports import insert_new_report, insert_new_reports_overview, latest_reports_overview, report_exists
def initialize_reports_overview(database: Database) -> None:
"""Initialize the reports overview if not present in the database."""
# The coverage measurement of the behave feature tests is unstable. Most of the time it reports the last two lines
# as covered, sometimes not. It's unclear why. To prevent needless checking of the coverage report coverage
# measurement of the last two lines and the if-statement has been turned off.
if latest_reports_overview(database): # pragma: no cover-behave
logging.info("Skipping initializing reports overview; it already exists")
else:
logging.info("Initializing reports overview") # pragma: no cover-behave
insert_new_reports_overview(
database, "{{user}} initialized the reports overview", dict(title="Reports", subtitle="")
) # pragma: no cover-behave
def import_report(database: Database, filename: pathlib.Path) -> None:
"""Read the report and store it in the database."""
# The coverage measurement of the behave feature tests is unstable. Most of the time it reports the last two lines
# as covered, sometimes not. It's unclear why. To prevent needless checking of the coverage report coverage
# measurement of the last two lines and the if-statement has been turned off.
with filename.open() as json_report:
imported_report = json.load(json_report)
if report_exists(database, imported_report["report_uuid"]): # pragma: no cover-behave
logging.info("Skipping import of %s; it already exists", filename)
else: # pragma: no cover-behave
insert_new_report(database, "{{user}} imported a new report", (imported_report, imported_report["report_uuid"]))
logging.info("Report %s imported", filename)
def import_example_reports(database: Database) -> None:
"""Import the example reports."""
example_reports_path = pathlib.Path(__file__).resolve().parent.parent / "example-reports"
for filename in example_reports_path.glob("example-report*.json"):
import_report(database, filename)
| apache-2.0 | 7,523,116,976,913,807,000 | 51.295455 | 120 | 0.719687 | false |
pythonbyexample/PBE | dbe/classviews/edit_custom.py | 1 | 5732 | from django.forms import formsets
from django.contrib import messages
from django.db.models import Q
from detail import *
from edit import *
from dbe.shared.utils import *
class SearchFormViewMixin(BaseFormView):
ignore_get_keys = ["page"]
def get_form_kwargs(self):
""" Returns the keyword arguments for instanciating the form. """
r = self.request
kwargs = dict(initial=self.get_initial())
if r.method in ("POST", "PUT"):
kwargs.update(dict(data=r.POST, files=r.FILES))
elif r.GET:
# do get form processing if there's get data that's not in ignore list
if [k for k in r.GET.keys() if k not in self.ignore_get_keys]:
kwargs.update(dict(data=r.GET))
return kwargs
def get(self, request):
form = self.get_form()
if self.request.GET:
if form.is_valid():
self.process_form(form)
else:
return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
class SearchFormView(FormView, SearchFormViewMixin):
"""FormView for search pages."""
class UpdateView2(UpdateView):
def get_success_url(self):
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
c = super(UpdateView2, self).get_context_data(**kwargs)
c.update(self.add_context())
return c
def add_context(self):
return {}
class UserUpdateView(UpdateView2):
def get_form_kwargs(self):
d = super(UpdateView2, self).get_form_kwargs()
d.update(dict(user=self.request.user))
return d
class CreateView2(CreateView):
def get_context_data(self, **kwargs):
c = super(CreateView2, self).get_context_data(**kwargs)
if hasattr(self, "add_context"):
c.update(self.add_context())
return c
def get_form_kwargs(self):
d = super(CreateView2, self).get_form_kwargs()
d.update(dict(user=self.request.user))
return d
class OwnObjMixin(SingleObjectMixin):
"""Access object, checking that it belongs to current user."""
item_name = None # used in permissions error message
owner_field = "creator" # object's field to compare to current user to check permission
def perm_error(self):
return HttpResponse("You don't have permissions to access this %s." % self.item_name)
def validate(self, obj):
if getattr(obj, self.owner_field) == self.request.user:
return True
def get_object(self, queryset=None):
obj = super(OwnObjMixin, self).get_object(queryset)
if not self.validate(obj): return None
return obj
class DeleteOwnObjView(OwnObjMixin, DeleteView):
"""Delete object, checking that it belongs to current user."""
class UpdateOwnObjView(OwnObjMixin, UpdateView2):
"""Update object, checking that it belongs to current user."""
class SearchEditFormset(SearchFormView):
"""Search form filtering a formset of items to be updated."""
model = None
formset_class = None
form_class = None
def get_form_class(self):
if self.request.method == "GET": return self.form_class
else: return self.formset_class
def get_queryset(self, form=None):
return self.model.objects.filter(self.get_query(form))
def get_query(self, form):
"""This method should always be overridden, applying search from the `form`."""
return Q()
def form_valid(self, form):
formset = None
if self.request.method == "GET":
formset = self.formset_class(queryset=self.get_queryset(form))
else:
form.save()
messages.success(self.request, "%s(s) were updated successfully" % self.model.__name__.capitalize())
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def form_invalid(self, form):
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get(self, request, *args, **kwargs):
form = self.get_form()
if form.is_bound:
if form.is_valid(): return self.form_valid(form)
else: return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
class CreateWithFormset(FormView):
""" Create multiple objects using a formset.
Passes user as an arg to each form init function.
"""
model = None
form_class = None
extra = 5
def get_form(self, form_class=None):
Formset = formsets.formset_factory(self.form_class, extra=self.extra)
Formset.form = staticmethod(curry(self.form_class, user=self.request.user))
return Formset(**self.get_form_kwargs())
def post(self, request, *args, **kwargs):
self.object = None
formset = self.get_form()
if formset.is_valid():
return self.form_valid(formset)
else:
return self.form_invalid(formset)
def form_valid(self, formset):
for form in formset:
if form.has_changed():
form.save()
return HttpResponseRedirect(reverse(self.success_url_name))
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(CreateWithFormset, self).get_context_data(**kwargs)
return updated( context, dict(formset=self.get_form()) )
| bsd-3-clause | -6,290,611,191,496,170,000 | 32.325581 | 112 | 0.629449 | false |
CINPLA/expipe-dev | expipe-templates-cinpla/get_templates.py | 1 | 1153 | import expipe
import os.path as op
import os
import json
overwrite = True
base_dir = op.join(op.abspath(op.dirname(op.expanduser(__file__))), 'templates')
templates = expipe.core.FirebaseBackend("/templates").get()
for template, val in templates.items():
identifier = val.get('identifier')
if identifier is None:
continue
path = template.split('_')[0]
name = identifier.split('_')[1:]
if path == 'person':
continue
if len(name) == 0:
continue
raise ValueError('No grouping on template "' + template + '"')
fbase = '_'.join(name)
fname = op.join(base_dir, path, fbase + '.json')
result = expipe.get_template(template=template)
if op.exists(fname) and not overwrite:
raise FileExistsError('The filename "' + fname +
'" exists, set ovewrite to true.')
os.makedirs(op.dirname(fname), exist_ok=True)
print('Saving template "' + template + '" to "' + fname + '"')
with open(fname, 'w') as outfile:
result = expipe.core.convert_to_firebase(result)
json.dump(result, outfile,
sort_keys=True, indent=4)
| gpl-3.0 | -6,103,008,432,294,853,000 | 33.939394 | 80 | 0.611448 | false |
prontodev/stillwithus | stillwithus/clientsites/tests.py | 1 | 6014 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import ClientSite
from servers.models import Server
class ClientSiteTest(TestCase):
def test_create_new_clientsite(self):
clientsite = ClientSite()
clientsite.domain = 'www.atlasperformancechicago.com'
self.assertFalse(clientsite.id)
clientsite.save()
self.assertTrue(clientsite.id)
clientsite = ClientSite.objects.get(id=clientsite.id)
self.assertEqual(clientsite.domain, 'www.atlasperformancechicago.com')
class ClientSiteViewTest(TestCase):
def setUp(self):
self.url = reverse('clientsites')
def test_clientsite_should_be_accessible(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_clientsite_should_use_correct_template(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'clientsites.html')
def test_clientsite_should_have_title(self):
response = self.client.get(self.url)
expected = '<title>Still with Us?</title>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_render_html_for_clientsites_correctly(self):
response = self.client.get(self.url)
expected = '<h1>Client Sites</h1>'
self.assertContains(response, expected, status_code=200)
expected = '<thead><tr><th>Domain</th><th>Still with Us?'
expected += '</th><th>Note</th></tr></thead>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_query_domains_and_check_if_still_with_us(self):
Server.objects.bulk_create([
Server(name='Pronto 1', ip='54.72.3.133'),
Server(name='Pronto 2', ip='54.72.3.103'),
Server(name='Pronto 3', ip='54.252.146.70'),
Server(name='Pronto 4', ip='54.67.50.151'),
Server(name='Pronto 5', ip='52.1.32.33'),
Server(name='Pronto 6', ip='27.254.65.18'),
Server(name='Pronto 7', ip='54.246.93.4'),
Server(name='Pronto 8', ip='54.228.219.35'),
Server(name='Pronto 9', ip='54.72.3.253'),
Server(name='Pronto 10', ip='54.171.171.172'),
Server(name='Pronto 11', ip='46.137.96.191'),
Server(name='Pronto 12', ip='54.194.28.91'),
Server(name='Pronto 13', ip='54.72.53.55'),
])
ClientSite.objects.bulk_create([
ClientSite(domain='www.prontomarketing.com'),
ClientSite(domain='www.atlasperformancechicago.com'),
])
response = self.client.get(self.url)
expected = '<tr><td><a href="http://www.prontomarketing.com" '
expected += 'target="_blank">www.prontomarketing.com</a></td>'
expected += '<td style="color: red;">No</td><td>---</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<td><a href="http://www.prontomarketing.com" '
expected += 'target="_blank">www.prontomarketing.com</a></td>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<tr><td><a href="http://www.atlasperformancechicago.com" '
expected += 'target="_blank">www.atlasperformancechicago.com</a></td>'
expected += '<td>Yes</td><td>---</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<td><a href="http://www.atlasperformancechicago.com" '
expected += 'target="_blank">www.atlasperformancechicago.com</a></td>'
self.assertContains(response, expected, count=1, status_code=200)
def test_clientsite_should_add_note_if_cannot_get_ip(self):
ClientSite.objects.create(
domain='dayton.kaiafit.com'
)
response = self.client.get(self.url)
expected = '<tr><td><a href="http://dayton.kaiafit.com" '
expected += 'target="_blank">dayton.kaiafit.com</a></td>'
expected += '<td>---</td><td>Cannot get IP</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
def test_clientsite_should_render_html_for_servers_correctly(self):
response = self.client.get(self.url)
expected = '<h1>Servers</h1>'
self.assertContains(response, expected, status_code=200)
expected = '<thead><tr><th>Name</th><th>IP</th></tr></thead>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_query_server_name_and_ip_correctly(self):
Server.objects.create(
name='AWS ELB',
ip='54.72.3.133'
)
Server.objects.create(
name='Bypronto',
ip='54.171.171.172'
)
response = self.client.get(self.url)
expected = '<tr><td>AWS ELB</td><td>54.72.3.133</td></tr>'
self.assertContains(response, expected, status_code=200)
expected = '<tr><td>Bypronto</td><td>54.171.171.172</td></tr>'
self.assertContains(response, expected, status_code=200)
class ClientSiteAdminTest(TestCase):
def setUp(self):
admin = User.objects.create_superuser(
'admin',
'[email protected]',
'password'
)
self.client.login(
username='admin',
password='password'
)
self.url = '/admin/clientsites/clientsite/'
def test_clientsite_admin_page_should_be_accessible(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_clientsite_admin_page_should_name_and_domain_columns(self):
ClientSite.objects.create(
domain='www.prontomarketing.com'
)
response = self.client.get(self.url)
expected = '<div class="text"><a href="?o=1">Domain</a></div>'
self.assertContains(response, expected, status_code=200)
| mit | -631,185,773,679,272,400 | 37.305732 | 78 | 0.618557 | false |
MMohan1/dwitter | dwitter_app/models.py | 1 | 1342 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
import hashlib
class Dweet(models.Model):
content = models.CharField(max_length=140)
user = models.ForeignKey(User)
creation_date = models.DateTimeField(auto_now=True, blank=True)
class UserProfile(models.Model):
user = models.ForeignKey(User)
# follows = models.ManyToManyField('self', related_name='followed_by', symmetrical=False)
def gravatar_url(self):
return "http://www.gravatar.com/avatar/%s?s=50" % hashlib.md5(self.user.email).hexdigest()
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
class Follow(models.Model):
follower = models.ForeignKey(User, related_name='follower')
following = models.ForeignKey(User, related_name='following')
follow_date = models.DateTimeField(auto_now=True)
class Likes(models.Model):
dwitte = models.ForeignKey(Dweet)
likes = models.ForeignKey(User)
creation_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ("dwitte", "likes")
class Comments(models.Model):
dwitte = models.ForeignKey(Dweet)
comment_by = models.ForeignKey(User)
comment = models.TextField()
creation_date = models.DateTimeField(auto_now=True)
| mit | 5,100,131,809,558,829,000 | 28.822222 | 98 | 0.716095 | false |
opensemanticsearch/open-semantic-etl | src/opensemanticetl/enhance_extract_law.py | 1 | 5028 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import etl_plugin_core
#
# get taxonomy for aggregated facets / filters
#
# example: '§ 153 Abs. 1 Satz 2' -> ['§ 153', '§ 153 Absatz 1', '§ 153 Absatz 1 Satz 2']
# todo:
def get_taxonomy(law_clause, law_code = None):
law_clauses = [law_clause]
return law_clauses
#1.a
#1(2)
#1 (2)
#
# extract law codes
#
class enhance_extract_law(etl_plugin_core.Plugin):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
clause_prefixes = [
'§',
'Article',
'Artikel',
'Art',
'Section',
'Sec',
]
clause_subsections = [
'Abschnitt',
'Absatz',
'Abs',
'Sentence',
'Satz',
'S',
'Halbsatz',
'Number',
'Nummer',
'Nr',
'Buchstabe',
]
text = etl_plugin_core.get_text(data)
clauses = []
rule = '(' + '|'.join(clause_prefixes) + ')\W*((\d+\W\w(\W|\b))|(\d+\w?))(\W?(' + '|'.join(clause_subsections) + ')\W*(\d+\w?|\w(\W|\b)))*'
for match in re.finditer(rule, text, re.IGNORECASE):
clause = match.group(0)
clause = clause.strip()
clauses.append(clause)
# if "§123" normalize to "§ 123"
if clause[0] == '§' and not clause[1] == ' ':
clause = '§ ' + clause[1:]
etl_plugin_core.append(data, 'law_clause_ss', clause)
code_matchtexts = etl_plugin_core.get_all_matchtexts(data.get('law_code_ss_matchtext_ss', []))
code_matchtexts_with_clause = []
preflabels = {}
if 'law_code_ss_preflabel_and_uri_ss' in data:
preflabels = etl_plugin_core.get_preflabels(data['law_code_ss_preflabel_and_uri_ss'])
if len(clauses)>0 and len(code_matchtexts)>0:
text = text.replace("\n", " ")
for code_match_id in code_matchtexts:
#get only matchtext (without ID/URI of matching entity)
for code_matchtext in code_matchtexts[code_match_id]:
for clause in clauses:
if clause + " " + code_matchtext in text or code_matchtext + " " + clause in text:
code_matchtexts_with_clause.append(code_matchtext)
# if "§123" normalize to "§ 123"
if clause[0] == '§' and not clause[1] == ' ':
clause = '§ ' + clause[1:]
law_code_preflabel = code_match_id
if code_match_id in preflabels:
law_code_clause_normalized = clause + " " + preflabels[code_match_id]
else:
law_code_clause_normalized = clause + " " + code_match_id
etl_plugin_core.append(data, 'law_code_clause_ss', law_code_clause_normalized)
if len(code_matchtexts)>0:
blacklist = []
listfile = open('/etc/opensemanticsearch/blacklist/enhance_extract_law/blacklist-lawcode-if-no-clause')
for line in listfile:
line = line.strip()
if line and not line.startswith("#"):
blacklist.append(line)
listfile.close()
if not isinstance(data['law_code_ss_matchtext_ss'], list):
data['law_code_ss_matchtext_ss'] = [data['law_code_ss_matchtext_ss']]
blacklisted_code_ids = []
for code_match_id in code_matchtexts:
for code_matchtext in code_matchtexts[code_match_id]:
if code_matchtext in blacklist:
if code_matchtext not in code_matchtexts_with_clause:
blacklisted_code_ids.append(code_match_id)
data['law_code_ss_matchtext_ss'].remove(code_match_id + "\t" + code_matchtext)
code_matchtexts = etl_plugin_core.get_all_matchtexts(data.get('law_code_ss_matchtext_ss', []))
if not isinstance(data['law_code_ss'], list):
data['law_code_ss'] = [data['law_code_ss']]
if not isinstance(data['law_code_ss_preflabel_and_uri_ss'], list):
data['law_code_ss_preflabel_and_uri_ss'] = [data['law_code_ss_preflabel_and_uri_ss']]
for blacklisted_code_id in blacklisted_code_ids:
if blacklisted_code_id not in code_matchtexts:
data['law_code_ss'].remove(preflabels[blacklisted_code_id])
data['law_code_ss_preflabel_and_uri_ss'].remove(preflabels[blacklisted_code_id] + ' <' + blacklisted_code_id + '>')
return parameters, data
| gpl-3.0 | -8,537,179,740,462,621,000 | 32.657718 | 147 | 0.498504 | false |
irinabov/debian-qpid-dispatch | tests/system_test.py | 1 | 46655 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""System test library, provides tools for tests that start multiple processes,
with special support for qdrouter processes.
Features:
- Create separate directories for each test.
- Save logs, sub-process output, core files etc.
- Automated clean-up after tests: kill sub-processes etc.
- Tools to manipulate qdrouter configuration files.
- Sundry other tools.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import errno, os, time, socket, random, subprocess, shutil, unittest, __main__, re, sys
from datetime import datetime
from subprocess import PIPE, STDOUT
from copy import copy
try:
import queue as Queue # 3.x
except ImportError:
import Queue as Queue # 2.7
from threading import Thread
from threading import Event
import json
import uuid
is_python2 = sys.version_info[0] == 2
# DISPATCH-1443: for python < 2.7 use unittest2 since the default unittest for
# older versions lacks features we need:
#
if is_python2 and sys.version_info[1] < 7:
# python < 2.7:
try:
import unittest2 as unittest
except ImportError:
raise Exception("Python unittest2 not installed - see README")
else:
import unittest
import proton
from proton import Message
from proton import Delivery
from proton.handlers import MessagingHandler
from proton.utils import BlockingConnection
from proton.reactor import AtLeastOnce, Container
from proton.reactor import AtMostOnce
from qpid_dispatch.management.client import Node
from qpid_dispatch_internal.compat import dict_iteritems
# Optional modules
MISSING_MODULES = []
try:
import qpidtoollibs
except ImportError as err:
qpidtoollibs = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
try:
import qpid_messaging as qm
except ImportError as err:
qm = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
"""True if fpath is executable"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir = os.path.split(program)[0]
if mydir:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# The directory where this module lives. Used to locate static configuration files etc.
DIR = os.path.dirname(__file__)
def _check_requirements():
"""If requirements are missing, return a message, else return empty string."""
missing = MISSING_MODULES
required_exes = ['qdrouterd']
missing += ["No exectuable %s"%e for e in required_exes if not find_exe(e)]
if missing:
return "%s: %s"%(__name__, ", ".join(missing))
MISSING_REQUIREMENTS = _check_requirements()
def retry_delay(deadline, delay, max_delay):
"""For internal use in retry. Sleep as required
and return the new delay or None if retry should time out"""
remaining = deadline - time.time()
if remaining <= 0:
return None
time.sleep(min(delay, remaining))
return min(delay*2, max_delay)
# Valgrind significantly slows down the response time of the router, so use a
# long default timeout
TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60))
def retry(function, timeout=TIMEOUT, delay=.001, max_delay=1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns or None if timeout expires.
"""
deadline = time.time() + timeout
while True:
ret = function()
if ret:
return ret
else:
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
return None
def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None):
"""Call function until it returns without exception or timeout expires.
Double the delay for each retry up to max_delay.
Calls exception_test with any exception raised by function, exception_test
may itself raise an exception to terminate the retry.
Returns what function returns if it succeeds before timeout.
Raises last exception raised by function on timeout.
"""
deadline = time.time() + timeout
while True:
try:
return function()
except Exception as e: # pylint: disable=broad-except
if exception_test:
exception_test(e)
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
raise
def get_local_host_socket(protocol_family='IPv4'):
if protocol_family == 'IPv4':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
elif protocol_family == 'IPv6':
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = '::1'
return s, host
def port_available(port, protocol_family='IPv4'):
"""Return true if connecting to host:port gives 'connection refused'."""
s, host = get_local_host_socket(protocol_family)
available = False
try:
s.connect((host, port))
except socket.error as e:
available = e.errno == errno.ECONNREFUSED
except:
pass
s.close()
return available
def wait_port(port, protocol_family='IPv4', **retry_kwargs):
"""Wait up to timeout for port (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
def check(e):
"""Only retry on connection refused"""
if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED:
raise
host = None
def connect():
# macOS gives EINVAL for all connection attempts after a ECONNREFUSED
# man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]"
s, host = get_local_host_socket(protocol_family)
try:
s.connect((host, port))
finally:
s.close()
try:
retry_exception(connect, exception_test=check, **retry_kwargs)
except Exception as e:
raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e))
def wait_ports(ports, **retry_kwargs):
"""Wait up to timeout for all ports (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
for port, protocol_family in dict_iteritems(ports):
wait_port(port=port, protocol_family=protocol_family, **retry_kwargs)
def message(**properties):
"""Convenience to create a proton.Message with properties set"""
m = Message()
for name, value in dict_iteritems(properties):
getattr(m, name) # Raise exception if not a valid message attribute.
setattr(m, name, value)
return m
class Process(subprocess.Popen):
"""
Popen that can be torn down at the end of a TestCase and stores its output.
"""
# Expected states of a Process at teardown
RUNNING = -1 # Still running
EXIT_OK = 0 # Exit status 0
EXIT_FAIL = 1 # Exit status 1
unique_id = 0
@classmethod
def unique(cls, name):
cls.unique_id += 1
return "%s-%s" % (name, cls.unique_id)
def __init__(self, args, name=None, expect=EXIT_OK, **kwargs):
"""
Takes same arguments as subprocess.Popen. Some additional/special args:
@param expect: Raise error if process staus not as expected at end of test:
L{RUNNING} - expect still running.
L{EXIT_OK} - expect proces to have terminated with 0 exit status.
L{EXIT_FAIL} - expect proces to have terminated with exit status 1.
integer - expected return code
@keyword stdout: Defaults to the file name+".out"
@keyword stderr: Defaults to be the same as stdout
"""
self.name = name or os.path.basename(args[0])
self.args, self.expect = args, expect
self.outdir = os.getcwd()
self.outfile = os.path.abspath(self.unique(self.name))
self.torndown = False
with open(self.outfile + '.out', 'w') as out:
kwargs.setdefault('stdout', out)
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
super(Process, self).__init__(args, **kwargs)
with open(self.outfile + '.cmd', 'w') as f:
f.write("%s\npid=%s\n" % (' '.join(args), self.pid))
except Exception as e:
raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" %
(args, kwargs, type(e).__name__, e))
def assert_running(self):
"""Assert that the process is still running"""
assert self.poll() is None, "%s: exited" % ' '.join(self.args)
def teardown(self):
"""Check process status and stop the process if necessary"""
if self.torndown:
return
self.torndown = True
def error(msg):
with open(self.outfile + '.out') as f:
raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % (
self.pid, msg, ' '.join(self.args),
self.outfile + '.cmd', f.read()));
status = self.poll()
if status is None: # Still running
self.terminate()
if self.expect != None and self.expect != Process.RUNNING:
error("still running")
self.expect = 0 # Expect clean exit after terminate
status = self.wait()
if self.expect != None and self.expect != status:
error("exit code %s, expected %s" % (status, self.expect))
def wait(self, timeout=None):
"""
Add support for a timeout when using Python 2
"""
if timeout is None:
return super(Process, self).wait()
if is_python2:
start = time.time()
while True:
rc = super(Process, self).poll()
if rc is not None:
return rc
if time.time() - start >= timeout:
raise Exception("Process did not terminate")
time.sleep(0.1)
else:
return super(Process, self).wait(timeout=timeout)
def communicate(self, input=None, timeout=None):
"""
Add support for a timeout when using Python 2
"""
if timeout is None:
return super(Process, self).communicate(input=input)
if is_python2:
self.wait(timeout=timeout)
return super(Process, self).communicate(input=input)
return super(Process, self).communicate(input=input,
timeout=timeout)
class Config(object):
"""Base class for configuration objects that provide a convenient
way to create content for configuration files."""
def write(self, name, suffix=".conf"):
"""Write the config object to file name.suffix. Returns name.suffix."""
name = name+suffix
with open(name, 'w') as f:
f.write(str(self))
return name
class Qdrouterd(Process):
"""Run a Qpid Dispatch Router Daemon"""
class Config(list, Config):
"""
List of ('section', {'name':'value', ...}).
Fills in some default values automatically, see Qdrouterd.DEFAULTS
"""
DEFAULTS = {
'listener': {'host':'0.0.0.0', 'saslMechanisms':'ANONYMOUS', 'idleTimeoutSeconds': '120',
'authenticatePeer': 'no', 'role': 'normal'},
'connector': {'host':'127.0.0.1', 'saslMechanisms':'ANONYMOUS', 'idleTimeoutSeconds': '120'},
'router': {'mode': 'standalone', 'id': 'QDR'}
}
def sections(self, name):
"""Return list of sections named name"""
return [p for n, p in self if n == name]
@property
def router_id(self): return self.sections("router")[0]["id"]
def defaults(self):
"""Fill in default values in gconfiguration"""
for name, props in self:
if name in Qdrouterd.Config.DEFAULTS:
for n,p in dict_iteritems(Qdrouterd.Config.DEFAULTS[name]):
props.setdefault(n,p)
def __str__(self):
"""Generate config file content. Calls default() first."""
def tabs(level):
return " " * level
def sub_elem(l, level):
return "".join(["%s%s: {\n%s%s}\n" % (tabs(level), n, props(p, level + 1), tabs(level)) for n, p in l])
def child(v, level):
return "{\n%s%s}" % (sub_elem(v, level), tabs(level - 1))
def props(p, level):
return "".join(
["%s%s: %s\n" % (tabs(level), k, v if not isinstance(v, list) else child(v, level + 1)) for k, v in
dict_iteritems(p)])
self.defaults()
return "".join(["%s {\n%s}\n"%(n, props(p, 1)) for n, p in self])
def __init__(self, name=None, config=Config(), pyinclude=None, wait=True,
perform_teardown=True, cl_args=None, expect=Process.RUNNING):
"""
@param name: name used for for output files, default to id from config.
@param config: router configuration
@keyword wait: wait for router to be ready (call self.wait_ready())
"""
cl_args = cl_args or []
self.config = copy(config)
self.perform_teardown = perform_teardown
if not name: name = self.config.router_id
assert name
# setup log and debug dump files
self.dumpfile = os.path.abspath('%s-qddebug.txt' % name)
self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile
default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')]
if not default_log:
config.append(
('log', {'module':'DEFAULT', 'enable':'trace+', 'includeSource': 'true', 'outputFile':name+'.log'}))
args = ['qdrouterd', '-c', config.write(name)] + cl_args
env_home = os.environ.get('QPID_DISPATCH_HOME')
if pyinclude:
args += ['-I', pyinclude]
elif env_home:
args += ['-I', os.path.join(env_home, 'python')]
args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args
super(Qdrouterd, self).__init__(args, name=name, expect=expect)
self._management = None
self._wait_ready = False
if wait:
self.wait_ready()
@property
def management(self):
"""Return a management agent proxy for this router"""
if not self._management:
self._management = Node.connect(self.addresses[0], timeout=TIMEOUT)
return self._management
def teardown(self):
if self._management:
try:
self._management.close()
except: pass
self._management = None
if not self.perform_teardown:
return
super(Qdrouterd, self).teardown()
# check router's debug dump file for anything interesting (should be
# empty) and dump it to stderr for perusal by organic lifeforms
try:
if os.stat(self.dumpfile).st_size > 0:
with open(self.dumpfile) as f:
sys.stderr.write("\nRouter %s debug dump file:\n" % self.config.router_id)
sys.stderr.write(f.read())
sys.stderr.flush()
except OSError:
# failed to open file. This can happen when an individual test
# spawns a temporary router (i.e. not created as part of the
# TestCase setUpClass method) that gets cleaned up by the test.
pass
@property
def ports_family(self):
"""
Return a dict of listener ports and the respective port family
Example -
{ 23456: 'IPv4', 243455: 'IPv6' }
"""
ports_fam = {}
for l in self.config.sections('listener'):
if l.get('protocolFamily'):
ports_fam[l['port']] = l['protocolFamily']
else:
ports_fam[l['port']] = 'IPv4'
return ports_fam
@property
def ports(self):
"""Return list of configured ports for all listeners"""
return [l['port'] for l in self.config.sections('listener')]
def _cfg_2_host_port(self, c):
host = c['host']
port = c['port']
protocol_family = c.get('protocolFamily', 'IPv4')
if protocol_family == 'IPv6':
return "[%s]:%s" % (host, port)
elif protocol_family == 'IPv4':
return "%s:%s" % (host, port)
raise Exception("Unknown protocol family: %s" % protocol_family)
@property
def addresses(self):
"""Return amqp://host:port addresses for all listeners"""
cfg = self.config.sections('listener')
return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def connector_addresses(self):
"""Return list of amqp://host:port for all connectors"""
cfg = self.config.sections('connector')
return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg]
@property
def hostports(self):
"""Return host:port for all listeners"""
return [self._cfg_2_host_port(l) for l in self.config.sections('listener')]
def is_connected(self, port, host='127.0.0.1'):
"""If router has a connection to host:port:identity return the management info.
Otherwise return None"""
try:
ret_val = False
response = self.management.query(type="org.apache.qpid.dispatch.connection")
index_host = response.attribute_names.index('host')
for result in response.results:
outs = '%s:%s' % (host, port)
if result[index_host] == outs:
ret_val = True
return ret_val
except:
return False
def wait_address(self, address, subscribers=0, remotes=0, containers=0,
count=1, **retry_kwargs ):
"""
Wait for an address to be visible on the router.
@keyword subscribers: Wait till subscriberCount >= subscribers
@keyword remotes: Wait till remoteCount >= remotes
@keyword containers: Wait till containerCount >= remotes
@keyword count: Wait until >= count matching addresses are found
@param retry_kwargs: keyword args for L{retry}
"""
def check():
# TODO aconway 2014-06-12: this should be a request by name, not a query.
# Need to rationalize addresses in management attributes.
# endswith check is because of M0/L/R prefixes
addrs = self.management.query(
type='org.apache.qpid.dispatch.router.address',
attribute_names=[u'name', u'subscriberCount', u'remoteCount', u'containerCount']).get_entities()
addrs = [a for a in addrs if a['name'].endswith(address)]
return (len(addrs) >= count
and addrs[0]['subscriberCount'] >= subscribers
and addrs[0]['remoteCount'] >= remotes
and addrs[0]['containerCount'] >= containers)
assert retry(check, **retry_kwargs)
def get_host(self, protocol_family):
if protocol_family == 'IPv4':
return '127.0.0.1'
elif protocol_family == 'IPv6':
return '::1'
else:
return '127.0.0.1'
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
def wait_connectors(self, **retry_kwargs):
"""
Wait for all connectors to be connected
@param retry_kwargs: keyword args for L{retry}
"""
for c in self.config.sections('connector'):
assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('protocolFamily'))),
**retry_kwargs), "Port not connected %s" % c['port']
def wait_ready(self, **retry_kwargs):
"""Wait for ports and connectors to be ready"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
self.wait_connectors(**retry_kwargs)
return self
def is_router_connected(self, router_id, **retry_kwargs):
try:
self.management.read(identity="router.node/%s" % router_id)
# TODO aconway 2015-01-29: The above check should be enough, we
# should not advertise a remote router in managment till it is fully
# connected. However we still get a race where the router is not
# actually ready for traffic. Investigate.
# Meantime the following actually tests send-thru to the router.
node = Node.connect(self.addresses[0], router_id, timeout=1)
return retry_exception(lambda: node.query('org.apache.qpid.dispatch.router'))
except:
return False
def wait_router_connected(self, router_id, **retry_kwargs):
retry(lambda: self.is_router_connected(router_id), **retry_kwargs)
class Tester(object):
"""Tools for use by TestCase
- Create a directory for the test.
- Utilities to create processes and servers, manage ports etc.
- Clean up processes on teardown"""
# Top level directory above any Tester directories.
# CMake-generated configuration may be found here.
top_dir = os.getcwd()
# The root directory for Tester directories, under top_dir
root_dir = os.path.abspath(__name__+'.dir')
def __init__(self, id):
"""
@param id: module.class.method or False if no directory should be created
"""
self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None
self.cleanup_list = []
def rmtree(self):
"""Remove old test class results directory"""
if self.directory:
shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True)
def setup(self):
"""Called from test setup and class setup."""
if self.directory:
os.makedirs(self.directory)
os.chdir(self.directory)
def teardown(self):
"""Clean up (tear-down, stop or close) objects recorded via cleanup()"""
self.cleanup_list.reverse()
errors = []
for obj in self.cleanup_list:
try:
for method in ["teardown", "tearDown", "stop", "close"]:
cleanup = getattr(obj, method, None)
if cleanup:
cleanup()
break
except Exception as exc:
errors.append(exc)
if errors:
raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors]))
def cleanup(self, x):
"""Record object x for clean-up during tear-down.
x should have on of the methods teardown, tearDown, stop or close"""
self.cleanup_list.append(x)
return x
def popen(self, *args, **kwargs):
"""Start a Process that will be cleaned up on teardown"""
return self.cleanup(Process(*args, **kwargs))
def qdrouterd(self, *args, **kwargs):
"""Return a Qdrouterd that will be cleaned up on teardown"""
return self.cleanup(Qdrouterd(*args, **kwargs))
port_range = (20000, 30000)
next_port = random.randint(port_range[0], port_range[1])
@classmethod
def get_port(cls, protocol_family='IPv4'):
"""Get an unused port"""
def advance():
"""Advance with wrap-around"""
cls.next_port += 1
if cls.next_port >= cls.port_range[1]:
cls.next_port = cls.port_range[0]
start = cls.next_port
while not port_available(cls.next_port, protocol_family):
advance()
if cls.next_port == start:
raise Exception("No available ports in range %s", cls.port_range)
p = cls.next_port
advance()
return p
class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods
"""A TestCase that sets up its own working directory and is also a Tester."""
def __init__(self, test_method):
unittest.TestCase.__init__(self, test_method)
Tester.__init__(self, self.id())
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass']))
cls.tester.rmtree()
cls.tester.setup()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tester'):
cls.tester.teardown()
del cls.tester
def setUp(self):
# Python < 2.7 will call setUp on the system_test.TestCase class
# itself as well as the subclasses. Ignore that.
if self.__class__ is TestCase: return
# Hack to support setUpClass on older python.
# If the class has not already been set up, do it now.
if not hasattr(self.__class__, 'tester'):
try:
self.setUpClass()
except:
if hasattr(self.__class__, 'tester'):
self.__class__.tester.teardown()
raise
Tester.setup(self)
def tearDown(self):
# Python < 2.7 will call tearDown on the system_test.TestCase class
# itself as well as the subclasses. Ignore that.
if self.__class__ is TestCase: return
Tester.teardown(self)
# Hack to support tearDownClass on older versions of python.
if hasattr(self.__class__, '_tear_down_class'):
self.tearDownClass()
def skipTest(self, reason):
"""Workaround missing unittest.TestCase.skipTest in python 2.6.
The caller must return in order to end the test"""
if hasattr(unittest.TestCase, 'skipTest'):
unittest.TestCase.skipTest(self, reason)
else:
print("Skipping test %s: %s" % (self.id(), reason))
# Hack to support tearDownClass on older versions of python.
# The default TestLoader sorts tests alphabetically so we insert
# a fake tests that will run last to call tearDownClass.
# NOTE: definitely not safe for a parallel test-runner.
if not hasattr(unittest.TestCase, 'tearDownClass'):
def test_zzzz_teardown_class(self):
"""Fake test to call tearDownClass"""
if self.__class__ is not TestCase:
self.__class__._tear_down_class = True
def assert_fair(self, seq):
avg = sum(seq)/len(seq)
for i in seq:
assert i > avg/2, "Work not fairly distributed: %s"%seq
def assertIn(self, item, items):
assert item in items, "%s not in %s" % (item, items)
if not hasattr(unittest.TestCase, 'assertRegexpMatches'):
def assertRegexpMatches(self, text, regexp, msg=None):
"""For python < 2.7: assert re.search(regexp, text)"""
assert re.search(regexp, text), msg or "Can't find %r in '%s'" %(regexp, text)
class SkipIfNeeded(object):
"""
Decorator class that can be used along with test methods
to provide skip test behavior when using both python2.6 or
a greater version.
This decorator can be used in test methods and a boolean
condition must be provided (skip parameter) to define whether
or not the test will be skipped.
"""
def __init__(self, skip, reason):
"""
:param skip: if True the method wont be called
:param reason: reason why test was skipped
"""
self.skip = skip
self.reason = reason
def __call__(self, f):
def wrap(*args, **kwargs):
"""
Wraps original test method's invocation and dictates whether or
not the test will be executed based on value (boolean) of the
skip parameter.
When running test with python < 2.7, if the "skip" parameter is
true, the original method won't be called. If running python >= 2.7, then
skipTest will be called with given "reason" and original method
will be invoked.
:param args:
:return:
"""
instance = args[0]
if self.skip:
if sys.version_info < (2, 7):
print("%s -> skipping (python<2.7) [%s] ..." % (f.__name__, self.reason))
return
else:
instance.skipTest(self.reason)
return f(*args, **kwargs)
return wrap
def main_module():
"""
Return the module name of the __main__ module - i.e. the filename with the
path and .py extension stripped. Useful to run the tests in the current file but
using the proper module prefix instead of '__main__', as follows:
if __name__ == '__main__':
unittest.main(module=main_module())
"""
return os.path.splitext(os.path.basename(__main__.__file__))[0]
class AsyncTestReceiver(MessagingHandler):
"""
A simple receiver that runs in the background and queues any received
messages. Messages can be retrieved from this thread via the queue member.
:param wait: block the constructor until the link has been fully
established.
:param recover_link: restart on remote link detach
"""
Empty = Queue.Empty
def __init__(self, address, source, conn_args=None, container_id=None,
wait=True, recover_link=False, msg_args={}):
super(AsyncTestReceiver, self).__init__(**msg_args)
self.address = address
self.source = source
self.conn_args = conn_args
self.queue = Queue.Queue()
self._conn = None
self._container = Container(self)
cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4())
self._container.container_id = cid
self._ready = Event()
self._recover_link = recover_link
self._recover_count = 0
self._stop_thread = False
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._thread.start()
if wait and self._ready.wait(timeout=TIMEOUT) is False:
raise Exception("Timed out waiting for receiver start")
def _main(self):
self._container.timeout = 5.0
self._container.start()
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
if self._thread.is_alive():
raise Exception("AsyncTestReceiver did not exit")
def on_start(self, event):
kwargs = {'url': self.address}
if self.conn_args:
kwargs.update(self.conn_args)
self._conn = event.container.connect(**kwargs)
def on_connection_opened(self, event):
kwargs = {'source': self.source}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_link_opened(self, event):
self._ready.set()
def on_link_closing(self, event):
event.link.close()
if self._recover_link and not self._stop_thread:
# lesson learned: the generated link name will be the same as the
# old link (which is bad) so we specify a new one
self._recover_count += 1
kwargs = {'source': self.source,
'name': "%s:%s" % (event.link.name, self._recover_count)}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_message(self, event):
self.queue.put(event.message)
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
if self._conn:
self._conn.close()
self._conn = None
class AsyncTestSender(MessagingHandler):
"""
A simple sender that runs in the background and sends 'count' messages to a
given target.
"""
class TestSenderException(Exception):
def __init__(self, error=None):
super(AsyncTestSender.TestSenderException, self).__init__(error)
def __init__(self, address, target, count=1, message=None,
container_id=None, presettle=False):
super(AsyncTestSender, self).__init__(auto_accept=False,
auto_settle=False)
self.address = address
self.target = target
self.total = count
self.presettle = presettle
self.accepted = 0
self.released = 0
self.modified = 0
self.rejected = 0
self.sent = 0
self.error = None
self.link_stats = None
self._message = message or Message(body="test")
self._container = Container(self)
cid = container_id or "ATS-%s:%s" % (target, uuid.uuid4())
self._container.container_id = cid
self._link_name = "%s-%s" % (cid, "tx")
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._thread.start()
def _main(self):
self._container.timeout = 5.0
self._container.start()
while self._container.process():
self._check_if_done()
def wait(self):
# don't stop it - wait until everything is sent
self._thread.join(timeout=TIMEOUT)
assert not self._thread.is_alive(), "sender did not complete"
if self.error:
raise AsyncTestSender.TestSenderException(self.error)
def on_start(self, event):
self._conn = self._container.connect(self.address)
def on_connection_opened(self, event):
option = AtMostOnce if self.presettle else AtLeastOnce
self._sender = self._container.create_sender(self._conn,
target=self.target,
options=option(),
name=self._link_name)
def on_sendable(self, event):
if self.sent < self.total:
self._sender.send(self._message)
self.sent += 1
def _check_if_done(self):
done = (self.sent == self.total
and (self.presettle
or (self.accepted + self.released + self.modified
+ self.rejected == self.sent)))
if done and self._conn:
self.link_stats = get_link_info(self._link_name,
self.address)
self._conn.close()
self._conn = None
def on_accepted(self, event):
self.accepted += 1;
event.delivery.settle()
def on_released(self, event):
# for some reason Proton 'helpfully' calls on_released even though the
# delivery state is actually MODIFIED
if event.delivery.remote_state == Delivery.MODIFIED:
return self.on_modified(event)
self.released += 1
event.delivery.settle()
def on_modified(self, event):
self.modified += 1
event.delivery.settle()
def on_rejected(self, event):
self.rejected += 1
event.delivery.settle()
def on_link_error(self, event):
self.error = "link error:%s" % str(event.link.remote_condition)
if self._conn:
self._conn.close()
self._conn = None
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self.error = "connection to remote dropped"
if self._conn:
self._conn.close()
self._conn = None
class QdManager(object):
"""
A means to invoke qdmanage during a testcase
"""
def __init__(self, tester=None, address=None, timeout=TIMEOUT,
router_id=None,
edge_router_id=None):
# 'tester' - can be 'self' when called in a test,
# or an instance any class derived from Process (like Qdrouterd)
self._tester = tester or Tester(None)
self._timeout = timeout
self._address = address
self.router_id = router_id
self.edge_router_id = edge_router_id
self.router = []
if self.router_id:
self.router = self.router + ['--router', self.router_id]
elif self.edge_router_id:
self.router = self.router + ['--edge-router', self.edge_router_id]
def __call__(self, cmd, address=None, input=None, expect=Process.EXIT_OK,
timeout=None):
assert address or self._address, "address missing"
p = self._tester.popen(
['qdmanage'] + cmd.split(' ')
+ self.router + ['--bus', address or self._address,
'--indent=-1',
'--timeout', str(timeout or self._timeout)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def create(self, long_type, kwargs):
cmd = "CREATE --type=%s" % long_type
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def update(self, long_type, kwargs, name=None, identity=None):
cmd = 'UPDATE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def delete(self, long_type, name=None, identity=None):
cmd = 'DELETE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
else:
assert False, "name or identity not supplied!"
self(cmd)
def query(self, long_type):
return json.loads(self('QUERY --type=%s' % long_type))
def get_log(self, limit=None):
cmd = 'GET-LOG'
if (limit):
cmd += " limit=%s" % limit
return json.loads(self(cmd))
class MgmtMsgProxy(object):
"""
Utility for creating and inspecting management messages
"""
class _Response(object):
def __init__(self, status_code, status_description, body):
self.status_code = status_code
self.status_description = status_description
if body.__class__ == dict and len(body.keys()) == 2 and 'attributeNames' in body.keys() and 'results' in body.keys():
results = []
names = body['attributeNames']
for result in body['results']:
result_map = {}
for i in range(len(names)):
result_map[names[i]] = result[i]
results.append(MgmtMsgProxy._Response(status_code, status_description, result_map))
self.attrs = {'results': results}
else:
self.attrs = body
def __getattr__(self, key):
return self.attrs[key]
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return self._Response(ap['statusCode'], ap['statusDescription'], msg.body)
def query_router(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_connections(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.connection'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_links(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.link'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_link_routes(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_link_route(self, name, kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_link_route(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def create_connector(self, name, **kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_connector(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_conn_link_routes(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_conn_link_route(self, name, kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_conn_link_route(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def read_conn_link_route(self, name):
ap = {'operation': 'READ',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
class TestTimeout(object):
"""
A callback object for MessagingHandler class
parent: A MessagingHandler with a timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class PollTimeout(object):
"""
A callback object for MessagingHandler scheduled timers
parent: A MessagingHandler with a poll_timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.poll_timeout()
def get_link_info(name, address):
"""
Query the router at address for the status and statistics of the named link
"""
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.link')
for item in rc:
if item.get('name') == name:
return item
return None
def has_mobile_dest_in_address_table(address, dest):
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.address')
has_dest = False
for item in rc:
if dest in item.get("name"):
has_dest = True
break
return has_dest
def get_inter_router_links(address):
"""
Return a list of all links with type="inter-router
:param address:
"""
inter_router_links = []
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.link')
for item in rc:
if item.get("linkType") == "inter-router":
inter_router_links.append(item)
return inter_router_links
class Timestamp(object):
"""
Time stamps for logging.
"""
def __init__(self):
self.ts = datetime.now()
def __str__(self):
return self.ts.strftime("%Y-%m-%d %H:%M:%S.%f")
class Logger(object):
"""
Record an event log for a self test.
May print per-event or save events to be printed later.
"""
def __init__(self, title="Logger", print_to_console=False, save_for_dump=True):
self.title = title
self.print_to_console = print_to_console
self.save_for_dump = save_for_dump
self.logs = []
def log(self, msg):
ts = Timestamp()
if self.save_for_dump:
self.logs.append( (ts, msg) )
if self.print_to_console:
print("%s %s" % (ts, msg))
sys.stdout.flush()
def dump(self):
print(self)
sys.stdout.flush()
def __str__(self):
lines = []
lines.append(self.title)
for ts, msg in self.logs:
lines.append("%s %s" % (ts, msg))
res = str('\n'.join(lines))
return res
| apache-2.0 | -226,416,414,018,872,100 | 35.649647 | 129 | 0.584075 | false |
LeonidasAntoniou/dk-plus | test files/beacon.py | 1 | 1799 | """
A simple program that sends/listens broadcast packets through UDP socket
Used to test the system if it is able to send/receive packets
"""
import time, math, sys, socket, threading, select, uuid
from collections import namedtuple
import cPickle as pickle
from params import Params
MAX_STAY = 5 #seconds until entry is removed from structure
Geo = namedtuple("Geo", "lat lon")
simple_msg = namedtuple("simple_msg", "ID text")
self_params = Params(dummy=True)
# Set the socket parameters
address = ('192.168.1.255', 54545) # host, port
sock_broad = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock_broad.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Create socket and bind to address
sock_listen = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_listen.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock_listen.bind(address)
def broad():
while True:
#msg = simple_msg(self_id,"I am here")
msg = self_params
assert sock_broad.sendto(pickle.dumps(msg), address), "Message failed to send"
time.sleep(1)
def listen():
print "Waiting for message"
while True:
try:
ready = select.select([sock_listen], [], [], 1.0)
if ready[0]:
d = sock_listen.recvfrom(4096)
raw_msg = d[0]
try:
msg = pickle.loads(raw_msg)
if msg.ID == self_params.ID:
pass
else:
print "From addr: '%s', msg: '%s'" % (d[1], msg)
except Exception, e:
print "Error in receiving: ", e
except socket.timeout:
print "Reached timeout. Closing..."
t_listen.cancel()
sock_listen.close()
t_listen = threading.Thread(target=listen)
t_broad = threading.Thread(target=broad)
t_listen.daemon = True
t_broad.daemon = True
t_listen.start()
t_broad.start()
time.sleep(100) #test for 100s
print "Closing beacon"
| gpl-3.0 | -9,053,915,557,494,988,000 | 25.850746 | 81 | 0.699277 | false |
Subsets and Splits